id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,289,900 | tui.py | lauratheq_lazywp/src/tui.py | #!/usr/bin/python3
import curses, sys
from curses.textpad import Textbox
from math import floor
def draw_menu_window(lazywp) -> None:
'''
Draws the window for the menu pad
Returns:
void
'''
# set dimensions
height = lazywp.rows - 2
width = 23
# set color based on context
color = lazywp.colors['default']
if lazywp.context == 1:
color = lazywp.colors['context_active']
menu = curses.newwin(height, width, 0, 0)
menu.clear()
menu.attron(color)
menu.box()
menu.addstr(0, 2, " LazyWP ")
lazywp.window.noutrefresh()
menu.refresh()
def draw_menu_pad(lazywp):
'''
Draws the menu pad with the menu entries
Returns:
curses.pad obj
'''
width = 21
height = len(lazywp.menu)
pad = curses.newpad(height, width)
counter = 0
for menu_entry in lazywp.menu:
# set label
label = menu_entry
llabel = label.lower()
fillers = width - 1 - len(label)
label += ' ' * fillers
# set color
color = lazywp.colors['default']
if counter == lazywp.menu_hover:
if lazywp.active_command == llabel:
color = lazywp.colors['menu_active_hover']
else:
color = lazywp.colors['menu_hover']
else:
if lazywp.active_command == llabel:
color = lazywp.colors['menu_active']
else:
color = lazywp.colors['default']
# add label to pad
pad.addstr(counter, 0, label, color)
counter += 1
pad.refresh(0, 0, 1, 2, height, width)
return pad
def draw_content_window(lazywp) -> None:
'''
Draws the window for the content pad
Returns:
void
'''
# set dimensions
height = lazywp.rows - 2
width = lazywp.cols - 24
# set color based on context
color = lazywp.colors['default']
if lazywp.context == 2:
color = lazywp.colors['context_active']
# set the label based on the current active command
label = lazywp.commands[lazywp.active_command]['label']
content = curses.newwin(height, width, 0, 24)
content.clear()
content.attron(color)
content.box()
content.addstr(0, 2, f" {label} ")
lazywp.window.noutrefresh()
content.refresh()
def draw_content_pad(lazywp):
'''
Adds the content pad to lazywp
Returns:
curses.pad obj
'''
width = lazywp.cols - 26
height = len(lazywp.content)
pad = curses.newpad(height, width)
counter = 0
for line in lazywp.content:
color = lazywp.colors['default']
string = line[0]
if len(line) == 2:
color = lazywp.colors[line[1]]
pad.addstr(counter, 0, string, color)
counter += 1
pad.refresh(lazywp.content_pad_pos, 0, 1, 26, lazywp.rows-5, lazywp.cols-2)
return pad
def draw_help_window(lazywp):
'''
Draws the help winwow and displays the content
Parameters:
lazywp (obj): the lazywp object
Returns:
void
'''
# set dimensions
height = lazywp.rows - 6
width = lazywp.cols - 20
begin_x = floor(lazywp.cols / 2) - floor(width / 2)
begin_y = floor(lazywp.rows / 2) - floor(height / 2)
# set color
color = lazywp.colors['menu_active_hover']
# build the window
help = curses.newwin(height, width, begin_y, begin_x)
help.clear()
help.attron(color)
help.box()
help.addstr(0, 2, f" Help [esc to close]")
help.refresh()
# build content
content = []
content.append(["Welcome to lazywp - a tui wrapper for wpcli"])
content.append([f"Version: {lazywp.version}"])
content.append([" "])
content.append(["Select menu entry and press [enter]"])
content.append(["Use [tab] to switch between the menu and content"])
content.append(["Press [?] for help"])
content.append(["Press [q] to exit lazywp"])
content.append([" "])
# fetch the commands
for command_module in lazywp.commands:
command_data = lazywp.commands[command_module]
label = command_data['label']
if len(command_data['actions']) != 0:
content.append([f"Keybindings for {label}"])
for binding in command_data['actions']:
content.append([f" [{binding[0]}] {binding[2]}"])
content.append([" "])
# build pad
help_pad_pos = 0
pad_width = width - 2
pad_height = len(content)
pad = curses.newpad(pad_height, pad_width)
counter = 0
for line in content:
color = lazywp.colors['default']
string = line[0]
if len(line) == 2:
color = lazywp.colors[line[1]]
pad.addstr(counter, 0, string, color)
counter += 1
hidden_lines = len(content) - height-2
pad.refresh(help_pad_pos, 0, begin_y+2, begin_x+2, height+1, width-2)
# fetch the keys
key = 0
esc = False
while esc != True:
help.refresh()
key = lazywp.window.getch()
# detect esc
if key == 27:
help.clearok(True)
help.clear()
help.refresh()
pad.clearok(True)
pad.clear()
esc = True
# scrolling position
if key == curses.KEY_DOWN:
if help_pad_pos < hidden_lines:
help_pad_pos += 1
elif key == curses.KEY_UP:
if help_pad_pos > 0:
help_pad_pos -= 1
pad.refresh(help_pad_pos, 0, begin_y+2, begin_x+2, height+1, width-2)
def draw_table_header(headers, lazywp) -> list:
'''
Generates a string which simulates table header.
It also beautifies it with spaces and -.
Returns:
list: the content
'''
# get the max length of the headers to calc needed space
header_width_values = headers.values()
header_width_sum = sum(header_width_values)
header_amount = len(header_width_values)
spacers = header_amount - 1
header_flex_width = lazywp.cols - 26 - header_width_sum - spacers - 2
# walk the headers
content = []
formatted_headers = []
formatted_spacers = []
# prepare headers
for header in headers:
# set the width of the flexible header
header_width = headers[header]
if header_width == 0:
header_width = header_flex_width
# add the vertical spacer to the header text
spaces = header_width - len(header)
header_text = header + (" " * spaces)
# add needed spaces to the header
formatted_headers.append(header_text)
header_spacer = '-' * len(header_text)
formatted_spacers.append(header_spacer)
content.append(['|'.join(formatted_headers)])
content.append(['|'.join(formatted_spacers)])
return content
def draw_table_entry(entries, color, lazywp):
'''
Generates a string which simulates table entries.
Parameters:
entries (list): list of col entries
color (str): current set color
lazywp (obj): the lazywp object
Returns:
str: the content
'''
# get the max length of the headers to calc needed space
entry_width_values = entries.values()
entry_width_sum = sum(entry_width_values)
entry_amount = len(entry_width_values)
spacers = entry_amount - 1
entry_flex_width = lazywp.cols - 26 - entry_width_sum - spacers - 2
# walk the headers
formatted_entries = []
for entry in entries:
# set the width of the flexible header
entry_width = entries[entry]
if entry_width == 0:
entry_width = entry_flex_width
# add the vertical spacer to the header text
spaces = entry_width - len(entry)
entry_text = entry + (" " * spaces)
# add needed spaces to the header
formatted_entries.append(entry_text)
return '|'.join(formatted_entries)
def msgbox(lazywp, messages=[]):
'''
Builds a messagebox
Parameters:
lazywp (obj): lazywp
messages (list): list of messages to be displayed
Returns:
void
'''
# remove the box if it exists
if isinstance(lazywp.box, curses.window):
lazywp.box.clearok(True)
lazywp.box.clear()
lazywp.box.refresh()
# calculate needed width and height
base_height = 2
height = len(messages) + base_height
max_width = 0
for message in messages:
if len(message) > max_width:
max_width = len(message)
width = max_width + 2
# center box
begin_x = floor(lazywp.cols / 2) - floor(width / 2)
begin_y = floor(lazywp.rows / 2) - floor(height / 2) - 2
# draw the pad
lazywp.box = curses.newwin(height, width, begin_y, begin_x)
lazywp.box.clear()
lazywp.box.attron(lazywp.colors['messagebox'])
lazywp.box.box()
# add messages
position_y = 1
for message in messages:
lazywp.box.addstr(position_y,1,message)
position_y += 1
lazywp.box.refresh()
def askbox(lazywp, messages=[]):
'''
Builds a messagebox which lets the user confirm their input
Parameters:
lazywp (obj): lazywp
messages (list): list of messages to be displayed
Returns:
void
'''
# remove the box if it exists
if isinstance(lazywp.box, curses.window):
lazywp.box.clearok(True)
lazywp.box.clear()
lazywp.box.refresh()
# calculate needed width and height
base_height = 2
height = len(messages) + base_height
max_width = 0
for message in messages:
if len(message) > max_width:
max_width = len(message)
if max_width < 17:
max_width = 17
width = max_width + 2
# center box
begin_x = floor(lazywp.cols / 2) - floor(width / 2)
begin_y = floor(lazywp.rows / 2) - floor(height / 2) - 2
# draw the pad
lazywp.box = curses.newwin(height, width, begin_y, begin_x)
lazywp.box.clear()
lazywp.box.attron(lazywp.colors['askbox'])
lazywp.box.box()
# add messages
position_y = 1
for message in messages:
lazywp.box.addstr(position_y,1,message)
position_y += 1
key = 0
enter = False
focus = 1
while enter != True:
# display yes no
if focus == 1:
lazywp.box.addstr(position_y, 2, '[ Yes ]', lazywp.colors['askbox_hover'])
else:
lazywp.box.addstr(position_y, 2, '[ Yes ]', lazywp.colors['askbox'])
if focus == 2:
lazywp.box.addstr(position_y, 10, '[ No ]', lazywp.colors['askbox_hover'])
else:
lazywp.box.addstr(position_y, 10, '[ No ]', lazywp.colors['askbox'])
lazywp.box.refresh()
key = lazywp.window.getch()
# detect input for qbox
if key == curses.KEY_LEFT:
focus = 1
elif key == curses.KEY_RIGHT:
focus = 2
elif key == 10:
enter = True
if focus == 2:
return False
return True
def slinputbox(lazywp, messages=[]):
'''
Builds a messagebox which lets the user input a single line
Parameters:
lazywp (obj): lazywp
messages (list): list of messages to be displayed
Returns:
string the user input
'''
# remove the box if it exists
if isinstance(lazywp.box, curses.window):
lazywp.box.clearok(True)
lazywp.box.clear()
lazywp.box.refresh()
# calculate needed width and height
base_height = 3
height = len(messages) + base_height
max_width = 0
for message in messages:
if len(message) > max_width:
max_width = len(message)
if max_width < 17:
max_width = 17
width = max_width + 2
# center box
begin_x = floor(lazywp.cols / 2) - floor(width / 2)
begin_y = floor(lazywp.rows / 2) - floor(height / 2) - 2
# draw the pad
lazywp.box = curses.newwin(height, width, begin_y, begin_x)
lazywp.box.clear()
lazywp.box.attron(lazywp.colors['inputbox'])
lazywp.box.box()
# add messages
position_y = 1
for message in messages:
lazywp.box.addstr(position_y,1,message)
position_y += 1
input_base = lazywp.box.subwin(1, width-2, begin_y+position_y, begin_x+1)
input_base.clear()
input_base.refresh()
lazywp.box.refresh()
textbox = curses.textpad.Textbox(input_base)
textbox.edit(enter_is_terminate)
message = textbox.gather()
return message
def enter_is_terminate(x):
'''
Callback function for the curses textbox to identify
the 'enter' press and returning the content
Parameters:
x (int): the key pressed
Returns:
int
'''
# enter detected
if x == 10:
x = 7
return x
| 12,805 | Python | .py | 394 | 25.408629 | 86 | 0.609758 | lauratheq/lazywp | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,901 | dashboard.py | lauratheq_lazywp/src/dashboard.py | #!/usr/bin/python3
def get_content(lazywp) -> list:
content = []
content.append([" __ ___ ______ ___ _____"])
content.append([" / / / _ /_ /\\ \\/ / | /| / / _ \\ "])
content.append([" / /__/ __ |/ /_ \\ /| |/ |/ / ___/"])
content.append(["/____/_/ |_/___/ /_/ |__/|__/_/ "])
content.append([" "])
content.append(["Welcome to lazywp - a tui wrapper for wpcli"])
content.append([f"Version: {lazywp.version}"])
content.append([" "])
content.append(["Select menu entry and press [enter]"])
content.append(["Use [tab] to switch between the menu and content"])
content.append(["Press [?] for help"])
content.append(["Press [q] to exit lazywp"])
content.append([" "])
content.append(["Visit https://github.com/lauratheq/lazywp for more information and contributing"])
return content
| 861 | Python | .py | 18 | 42.944444 | 103 | 0.544803 | lauratheq/lazywp | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,902 | plugins.py | lauratheq_lazywp/src/commands/plugins.py | #!/usr/bin/python3
import json
def config():
return {
'label': 'Plugins',
'menu': 'Plugins',
'actions': [
['a', 'toggle_activation', 'Toggle activation of a plugin'],
['i', 'install_plugin', 'Install new plugin'],
['r', 'deinstall_plugin', 'Deinstalls and removes a plugin'],
['u', 'update_plugin', 'Update plugin'],
['U', 'update_all_plugins', 'Update all plugins'],
['t', 'toggle_autoupdate', 'Toggle Autoupdate'],
['v', 'verify_plugin', 'Verify plugin agains wp.org']
],
'statusbar': [
'a: de/active',
'i: install',
'r: remove',
'u: update (U: all)',
]
}
def get_content(lazywp) -> list:
'''
Builds the basic content for the plugins view
Parameters:
lazywp (obj): the lazywp object
returns:
list: the content to be drawn
'''
# set defaults
content = []
lazywp.wp("plugin list --format=json")
plugins = lazywp.wp_output
plugins = json.loads(plugins)
# check if plugins exists
if len(plugins) == 0:
return [['No plugins found.']]
# build the table header
lazywp.has_header = True
headers = lazywp.tui.draw_table_header({
'Name': 0,
'Status': 8,
'Version': 10,
'Update Available': 17,
'AU': 3
}, lazywp)
content += headers
# set local holder
active_plugin = plugins[lazywp.cursor_position]
lazywp.command_holder['active_plugin'] = active_plugin
# walk the plugins
plugin_counter = 0
for plugin in plugins:
color = 'entry_default'
if lazywp.cursor_position == plugin_counter:
color = 'entry_hover'
if plugin['update'] == 'available':
color = 'entry_active'
if lazywp.cursor_position == plugin_counter:
color = 'entry_active_hover'
line = lazywp.tui.draw_table_entry({
plugin['name']: 0,
plugin['status']: 8,
plugin['version']: 10,
plugin['update']: 17,
plugin['auto_update']: 3
}, color, lazywp)
content.append([line, color])
plugin_counter += 1
return content
def toggle_activation(lazywp, data):
'''
Toggles the activation of a plugin
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
lazywp.reload_content = True
if data['active_plugin']['status'] == 'inactive':
lazywp.msgbox([f"Activating plugin {data['active_plugin']['name']}"])
lazywp.wp(f"plugin activate {data['active_plugin']['name']}", False)
elif data['active_plugin']['status'] == 'active':
lazywp.msgbox([f"Deactivating plugin {data['active_plugin']['name']}"])
lazywp.wp(f"plugin deactivate {data['active_plugin']['name']}", False)
def install_plugin(lazywp, data):
'''
Asks a user for a plugin which needs to be installed
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
plugin = lazywp.slinputbox([f"Please enter the slug of the plugin you want to install"])
lazywp.msgbox([f"Downloading plugin {plugin}"])
lazywp.wp(f"plugin install {plugin}", False)
lazywp.reload_content = True
def deinstall_plugin(lazywp, data):
'''
Deinstalls a plugin
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
result = lazywp.askbox([f"Are you sure you want to delete {data['active_plugin']['name']}?"])
if result == True:
lazywp.reload_content = True
lazywp.cursor_position = 0
lazywp.msgbox([f"Deleting plugin {data['active_plugin']['name']}"])
lazywp.wp(f"plugin delete {data['active_plugin']['name']}", False)
def update_plugin(lazywp, data):
'''
Updates a plugin
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
lazywp.reload_content = True
lazywp.msgbox([f"Updating plugin {data['active_plugin']['name']}"])
lazywp.wp(f"plugin update {data['active_plugin']['name']}", False)
def update_all_plugins(lazywp, data):
'''
Updates all plugins
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
lazywp.reload_content = True
lazywp.msgbox([f"Updating all plugins"])
lazywp.wp(f"plugin update --all", False)
def toggle_autoupdate(lazywp, data):
'''
Toggles the autoupdate of a plugin
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
lazywp.reload_content = True
if data['active_plugin']['auto_update'] == 'off':
lazywp.msgbox([f"Activating autoupdate for plugin {data['active_plugin']['name']}"])
lazywp.wp(f"plugin auto-updates enable {data['active_plugin']['name']}", False)
elif data['active_plugin']['auto_update'] == 'on':
lazywp.msgbox([f"Deactivating autoupdate for plugin {data['active_plugin']['name']}"])
lazywp.wp(f"plugin auto-updates disable {data['active_plugin']['name']}", False)
def verify_plugin(lazywp, data):
lazywp.log.debug('verify_plugin')
| 5,485 | Python | .py | 157 | 27.649682 | 97 | 0.603439 | lauratheq/lazywp | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,903 | themes.py | lauratheq_lazywp/src/commands/themes.py | #!/usr/bin/python3
import json
def config():
return {
'label': 'Themes',
'menu': 'Themes',
'actions': [
['a', 'toggle_activation', 'Toggle activation of a theme'],
['i', 'install_theme', 'Install new theme'],
['r', 'deinstall_theme', 'Deinstalls and removes a theme'],
['u', 'update_theme', 'Update theme'],
['U', 'update_all_themes', 'Update all themes'],
['t', 'toggle_autoupdate', 'Toggle Autoupdate'],
['v', 'verify_theme', 'Verify theme agains wp.org']
],
'statusbar': [
'a: de/active',
'i: install',
'r: remove',
'u: update (U: all)',
]
}
def get_content(lazywp) -> list:
'''
Builds the basic content for the themes view
Parameters:
lazywp (obj): the lazywp object
returns:
list: the content to be drawn
'''
# set defaults
content = []
lazywp.wp("theme list --format=json")
themes = lazywp.wp_output
themes = json.loads(themes)
# check if themes exists
if len(themes) == 0:
return [['No themes found.']]
# build the table header
lazywp.has_header = True
headers = lazywp.tui.draw_table_header({
'Name': 0,
'Status': 8,
'Version': 10,
'Update Available': 17,
'AU': 3
}, lazywp)
content += headers
# set local holder
active_theme = themes[lazywp.cursor_position]
lazywp.command_holder['active_theme'] = active_theme
# walk the themes
theme_counter = 0
for theme in themes:
color = 'entry_default'
if lazywp.cursor_position == theme_counter:
color = 'entry_hover'
if theme['update'] == 'available':
color = 'entry_active'
if lazywp.cursor_position == theme_counter:
color = 'entry_active_hover'
line = lazywp.tui.draw_table_entry({
theme['name']: 0,
theme['status']: 8,
theme['version']: 10,
theme['update']: 17,
theme['auto_update']: 3
}, color, lazywp)
content.append([line, color])
theme_counter += 1
return content
def toggle_activation(lazywp, data):
'''
Toggles the activation of a theme
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
lazywp.reload_content = True
if data['active_theme']['status'] == 'inactive':
lazywp.msgbox([f"Activating theme {data['active_theme']['name']}"])
lazywp.wp(f"theme activate {data['active_theme']['name']}", False)
elif data['active_theme']['status'] == 'active':
lazywp.msgbox([f"Deactivating theme {data['active_theme']['name']}"])
lazywp.wp(f"theme deactivate {data['active_theme']['name']}", False)
def install_theme(lazywp, data):
'''
Asks a user for a theme which needs to be installed
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
theme = lazywp.slinputbox([f"Please enter the slug of the theme you want to install"])
lazywp.msgbox([f"Downloading theme {theme}"])
lazywp.wp(f"theme install {theme}", False)
lazywp.reload_content = True
def deinstall_theme(lazywp, data):
'''
Deinstalls a theme
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
result = lazywp.askbox([f"Are you sure you want to delete {data['active_theme']['name']}?"])
if result == True:
lazywp.reload_content = True
lazywp.cursor_position = 0
lazywp.msgbox([f"Deleting theme {data['active_theme']['name']}"])
lazywp.wp(f"theme delete {data['active_theme']['name']}", False)
def update_theme(lazywp, data):
'''
Updates a theme
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
lazywp.reload_content = True
lazywp.msgbox([f"Updating theme {data['active_theme']['name']}"])
lazywp.wp(f"theme update {data['active_theme']['name']}", False)
def update_all_themes(lazywp, data):
'''
Updates all themes
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
lazywp.reload_content = True
lazywp.msgbox([f"Updating all themes"])
lazywp.wp(f"theme update --all", False)
def toggle_autoupdate(lazywp, data):
'''
Toggles the autoupdate of a theme
Parameters:
lazywp (obj): the lazywp object
data (dict): the transfer data dict
Returns:
void
'''
lazywp.reload_content = True
if data['active_theme']['auto_update'] == 'off':
lazywp.msgbox([f"Activating autoupdate for theme {data['active_theme']['name']}"])
lazywp.wp(f"theme auto-updates enable {data['active_theme']['name']}", False)
elif data['active_theme']['auto_update'] == 'on':
lazywp.msgbox([f"Deactivating autoupdate for theme {data['active_theme']['name']}"])
lazywp.wp(f"theme auto-updates disable {data['active_theme']['name']}", False)
def verify_theme(lazywp, data):
lazywp.log.debug('verify_theme')
| 5,398 | Python | .py | 157 | 27.095541 | 96 | 0.596811 | lauratheq/lazywp | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,904 | setup.py | gmp007_PropertyExtractor/setup.py | """
PropertyExtractor -- LLM-based model to extract material property from unstructured dataset
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation
version 3 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
Copyright: Chinedu Ekuma - 2024
[email protected]
"""
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
try:
from importlib.metadata import version # Python 3.8+
except ImportError:
from importlib_metadata import version # Python <3.8
#from setuptools import setup
setup()
| 892 | Python | .py | 21 | 39.47619 | 92 | 0.793743 | gmp007/PropertyExtractor | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,905 | readin.py | gmp007_PropertyExtractor/src/readin.py | """
PropertyExtract -- LLM-based model to extract material property from unstructured dataset
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation
version 3 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
Email: [email protected]
"""
import os
import sys
import spacy
from writeout import write_default_inputs,write_additionalprompts,write_default_keywords,print_default_input_message,write_prep_keyword_prompts
def get_version():
try:
from importlib.metadata import version # Python 3.8+
return version("PropertyExtract")
except ImportError:
try:
from importlib_metadata import version # Python <3.8
return version("PropertyExtract")
except ImportError:
import pkg_resources
return pkg_resources.get_distribution("PropertyExtract").version
def load_additional_prompts(filename):
if os.path.exists(filename):
try:
with open(filename, "r", encoding='utf-8') as file:
return file.read().strip()
except IOError as e:
print(f"An error occurred while reading the file: {e}")
else:
print(f"You have not provided additional prompt file, '{filename}'. Make sure that's what you want!")
return ""
def read_input():
"""
Read the application configuration from an input file and environment variables.
Returns:
- Dictionary containing configuration settings.
"""
model_type = os.getenv('MODEL_TYPE', 'gemini').lower()
api_key_env_var = 'GEMINI_PRO_API_KEY' if model_type == 'gemini' else 'OPENAI_API_KEY'
api_key = os.getenv(api_key_env_var)
if not api_key:
print(f"API key for {model_type} is not set.")
print(f"Please set it as: export {api_key_env_var}='your_actual_api_key_here'")
sys.exit(1)
cwd = os.getcwd()
ppt_extract_exist = os.path.exists(os.path.join(cwd, "extract.in"))
run_mode_flag = (len(sys.argv) > 1 and sys.argv[1] == "-0")
if run_mode_flag and not ppt_extract_exist:
write_default_inputs(cwd)
write_additionalprompts(cwd)
write_default_keywords(cwd)
write_prep_keyword_prompts(cwd)
print_default_input_message()
sys.exit(0)
"""
Read the stress component options from the 'extract.in' file.
...
rotation = on/off
...
Returns:
- Dictionary of the component settings.
"""
options = {
'api_key': api_key,
'model_type': 'gemini',
'model_name': 'gemini-pro',
'property_name': 'thickness',
'property_unit': 'Angstrom',
'temperature': 0.0,
'top_p': 0.95,
'max_output_tokens': 80,
'use_keywords': False,
'additional_prompts': None,
'inputfile_name': None,
'outputfile_name': None,
'column_name': 'Text',
}
try:
with open("extract.in", "r") as f:
lines = f.readlines()
# previous_line = None
for line in lines:
line = line.strip()
if line.startswith("#") or not line:
#previous_line = line.strip()
continue
key, value = line.strip().split('=', 1)
key = key.strip()
value = value.strip()
if key in ["inputfile_name", "outputfile_name", "additional_prompts", "column_name", "property_name"]:
options[key] = value
elif key in ["model_type","model_name", "property_unit"]:
options[key] = value.lower()
elif key in ["use_ml_model", "use_keywords"]:
options[key] = value.lower() in ['true', 'yes', '1','on']
elif key in options:
if key in ['temperature','top_p','max_output_tokens']:
options[key] = float(value)
else:
options[key] = value.lower() == 'on'
else:
#options['custom_options'][key] = value
options.setdefault('custom_options', {})[key] = value
#if options.get('job_submit_command'):
# os.environ["ASE_VASP_COMMAND"] = options['job_submit_command']
except FileNotFoundError:
print("'extract.in' file not found. Using default settings.")
#model_type = options.get("model_type")
run_mode_flag = (len(sys.argv) > 1 and sys.argv[1] == "-0") #and 'dimensional' in options
if run_mode_flag and ppt_extract_exist:
write_default_inputs(cwd)
print_default_input_message()
sys.exit(0)
return options
def ensure_spacy_model(model_name="en_core_web_sm"):
"""Ensure that the spaCy model is downloaded and available."""
try:
# Try to load the model to see if it's already installed
spacy.load(model_name)
print(f"Model {model_name} is already installed.")
except OSError:
# If the model isn't installed, download it
print(f"Downloading the spaCy model {model_name}...")
spacy.cli.download(model_name)
def configure_api(model_type):
if model_type.lower() == 'gemini':
import google.generativeai as genai
gemini_pro_api_key = os.getenv('GEMINI_PRO_API_KEY')
if not gemini_pro_api_key:
print("GEMINI_PRO_API_KEY environment variable not set.")
sys.exit(1)
try:
genai.configure(api_key=gemini_pro_api_key)
print("Model Configured to use Gemini Pro with provided API key.")
except Exception as e:
print(f"Failed to configure Gemini Pro: {e}")
sys.exit(1)
elif model_type.lower() == 'chatgpt':
gpt4_api_key = os.getenv('OPENAI_API_KEY')
if not gpt4_api_key:
print("OPENAI_API_KEY environment variable not set.")
sys.exit(1)
os.environ["OPENAI_API_KEY"] = gpt4_api_key
print("Configured for OpenAI with provided API key.")
else:
print("Invalid model type specified. Please choose 'gemini' or 'chatgpt'.")
sys.exit(1)
def configure_apiold(model_type, api_key):
if model_type.lower() == 'gemini':
import google.generativeai as genai
try:
genai.configure(api_key=api_key)
print("Model Configured to use Gemini Pro with provided API key.")
except Exception as e:
print(f"Failed to configure Gemini Pro: {e}")
sys.exit(1)
elif model_type.lower() == 'chatgpt':
os.environ["OPENAI_API_KEY"] = api_key
print("Configured for ChatGPT with provided API key.")
else:
print("Invalid model type specified. Please choose 'gemini' or 'chatgpt'.")
sys.exit(1)
| 7,189 | Python | .py | 166 | 33.614458 | 143 | 0.6112 | gmp007/PropertyExtractor | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,906 | propertyextractor.py | gmp007_PropertyExtractor/src/propertyextractor.py | """
PropertyExtractor -- LLM-based model to extract material property from unstructured dataset
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation
version 3 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
Email: [email protected]
"""
#@title Multishot and Dynamical LLM
#Multishot dynamical approach - Gemini
import pandas as pd
import re
import os
import csv
import time
import json
import traceback
from datetime import datetime
import logging
import random
from textblob import TextBlob
import spacy
#from googleapiclient.errors import HttpError
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import google.generativeai as genai
from google.generativeai import GenerationConfig
import openai
from openai import OpenAI
#from openai.error import OpenAIError
#from openai.error import RateLimitError
from chardet import detect
from urllib3.exceptions import MaxRetryError, ReadTimeoutError
from mendeleev import element as get_element
import nltk
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
nlp = spacy.load("en_core_web_sm")
import backoff #Solve "HTTP/1.1 429 Too Many Requests"
class PropertyExtractor:
def __init__(self, property_name, property_unit, model_name='gemini-pro', model_type='gemini', temperature=0.0, top_p =0.95, max_output_tokens=80, additional_prompts=None,keyword_filepath=None,prep_keyword_path=None):
self.property = property_name.lower() #self._create_property_regex(property_name)
self.unit_conversion = property_unit
self.additional_prompts = additional_prompts
self.model_type = model_type.lower()
self.model_name = model_name
self.temperature = temperature
self.top_p = top_p
self.max_output_tokens = max_output_tokens
self.recent_analyses = [] # To store recent analyses for a multi-shot approach
self.keyword_filepath = keyword_filepath
self.keywords = self.load_keywords() or {}
self.keywords_str = self._build_keywords()
self.prep_keyword_path = prep_keyword_path
if self.prep_keyword_path:
print("Post processing keywords read successfully")
if self.load_keywords():
print("Keywords read successfully!")
if self.additional_prompts:
print("Additional prompts read successfully!")
if self.model_type == 'gemini':
self.model = genai.GenerativeModel(self.model_name)
self.generation_config = GenerationConfig(
stop_sequences= None, # ["Keywords:", "Introduction", "Conclusion"], # Adjust as necessary
temperature=self.temperature, # Balanced for precision and flexibility
top_p=self.top_p, # Broad yet focused selection of tokens
top_k=50, # Considers a wide range of possible tokens
candidate_count=1, # Single, well-considered response (increase if comparison needed)
max_output_tokens=self.max_output_tokens # Enough for detailed responses and explanations
)
elif self.model_type == 'chatgpt':
self.model = OpenAI()
else:
raise ValueError("Unsupported model type provided. Use 'gemini' or 'gpt-4'.")
self.session = requests.Session()
retries = Retry(
total=5,
backoff_factor=1.5,
status_forcelist=[429, 500, 502, 503, 504],
allowed_methods=frozenset(['GET', 'POST'])
)
self.session.mount('https://', HTTPAdapter(max_retries=retries))
self.elements = {}
#{
#'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8, 'F': 9, 'Ne': 10,
#'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15, 'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20,
#'Sc': 21, 'Ti': 22, 'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29, 'Zn': 30,
#'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36, 'Rb': 37, 'Sr': 38, 'Y': 39, 'Zr': 40,
#'Nb': 41, 'Mo': 42, 'Tc': 43, 'Ru': 44, 'Rh': 45, 'Pd': 46, 'Ag': 47, 'Cd': 48, 'In': 49, 'Sn': 50,
#'Sb': 51, 'Te': 52, 'I': 53, 'Xe': 54, 'Cs': 55, 'Ba': 56, 'La': 57, 'Ce': 58, 'Pr': 59, 'Nd': 60,
#'Pm': 61, 'Sm': 62, 'Eu': 63, 'Gd': 64, 'Tb': 65, 'Dy': 66, 'Ho': 67, 'Er': 68, 'Tm': 69, 'Yb': 70,
#'Lu': 71, 'Hf': 72, 'Ta': 73, 'W': 74, 'Re': 75, 'Os': 76, 'Ir': 77, 'Pt': 78, 'Au': 79, 'Hg': 80,
#'Tl': 81, 'Pb': 82, 'Bi': 83, 'Po': 84, 'At': 85, 'Rn': 86, 'Fr': 87, 'Ra': 88, 'Ac': 89, #'Th': 90,
#'Pa': 91, 'U': 92, 'Np': 93, 'Pu': 94, 'Am': 95, 'Cm': 96, 'Bk': 97, 'Cf': 98, 'Es': 99, 'Fm': 100,
#'Md': 101, 'No': 102, 'Lr': 103, 'Rf': 104, 'Db': 105, 'Sg': 106, 'Bh': 107, 'Hs': 108, 'Mt': 109,
#'Ds': 110, 'Rg': 111, 'Cn': 112, 'Nh': 113, 'Fl': 114, 'Mc': 115, 'Lv': 116, 'Ts': 117, 'Og': 118
#}
def load_keywords(self):
"""Load keywords from a JSON file, checking if the file is in the current directory."""
try:
if not os.path.exists(self.keyword_filepath):
warnings.warn(f"Keyword file not found in the current working directory: {self.keyword_filepath}", UserWarning)
return None
with open(self.keyword_filepath, "r", encoding="utf-8") as file:
return json.load(file)
except json.JSONDecodeError as e:
warnings.warn(f"Error decoding JSON from the file: {e}", UserWarning)
except IOError as e:
warnings.warn(f"IO error occurred while reading the file: {e}", UserWarning)
return None
def _create_property_regex(self, property_name):
pattern = re.sub(r'\s+', r'\\s*-?\\s*', property_name.lower())
return re.compile(r'\b' + pattern + r'\b', re.IGNORECASE)
def _build_prompt(self):
"""Builds an interactive and conditional analysis prompt for extracting material properties from scientific texts."""
def generate_property_pattern(keyword):
parts = re.split(r'[- ]', keyword)
pattern = r'\b'
for part in parts:
pattern += f'{part}[ -]?'
pattern += r's?\b'
return pattern
property_pattern = generate_property_pattern(self.property)
property_regex = re.compile(property_pattern, re.IGNORECASE)
additional_instructions = self.__additional_instructions() #self.additional_prompts if self.additional_prompts else "No additional instructions provided."
formula_standardization_checks = self._formula_standardization()
standardized_formulas = self.get_standardized_formulas()
system_prompt = f'''
TASK DESCRIPTION:
Analyze scientific literature to identify materials and their corresponding property related to the "{self.property}". Focus on recognizing all variations of the "{self.property}" and explicitly stated numerical values and
optionally, the method used in obtaining the "{self.property}" value (e.g., measured using AFM method). If method is not provided report "None".
ADDITIONAL INSTRUCTIONS:
{additional_instructions}
FORMULA STANDARDIZATION:
{formula_standardization_checks}{self._unit_conversion()}\n\n
Use the list below as a sample of how to convert material names and complex structures to standardized formulas: {standardized_formulas}
PROCESS FLOW:
1. Strictly confirm the presence of the primary keyword "{self.property}":
- Check the text and tables for "{self.property}" using regex patterns and structured data extraction techniques. Be explicit to capture any variations (case variation, plural form, abreviation, etc.).
- If "YES" ("{self.property}" found), proceed to STEP 2.
- If "NO" ("{self.property}" not found), log the absence and halt further analysis for this text.
2. Extract values, units, and methods associated with "{self.property}":
- Identify all instances where "{self.property}" and its numerical values with units appear in the current text, ignoring any inferred or unrelated data.
- (Be very strict): Check and verify if "{self.property}" is listed in a table. [Respond with "Yes" or "No" only]. If "Yes", focus specifically on columns that list "{self.property}" explicitly or implicitly as headers. Extract "{self.property}" and the unit from corresponding columns using contextual clues within the table.
- Separate entries for each material and "{self.property}" value.
- (Be strict). Is the [extracted value] of "{self.property}" for the given material correct? Be very strict. [Answer "Yes" or "No" only]. If "No", halt processing present text.
- (Be strict). If [extracted value] of "{self.property}" has [error value] to it, [e.g., 13 ± 1], get the average; if "{self.property}" is given as range, [e.g., 1-3.5], create separate entries for each range
3. Strict Verification of Property Values, Keywords, and Chemical Formulae:
- Keyword Association Check: Confirm whether the extracted "{self.property}" value is associated with any of the keywords "{self.keywords_str}" in the text. Consider the weights, which indicate importance and synonyms of the keywords:
- "{self.keywords_str}"
- Accuracy Confirmation (Be very strict): Verify if the "{self.property}" value [extracted value] is accurate for the identified material in this text. [Respond with "Yes" or "No"]. A "No" response requires re-evaluation of
the data extraction process.
- Chemical Formula Validation (Be very strict): Ensure the identified chemical formula for the material matches the "{self.property}" value in this text. Is the formula correct? [Respond with "Yes" or "No"]. If "No",
reassess and validate the formula accuracy.
- Unique Entry Creation (Be very strict): You seem to have created an entry for non-existing "{self.property}" and [chemical formula] in the present text. Do not create such entry.
- Uniqueness and IUPAC Standards Compliance: Does the extracted material have a chemical formula that is uniquely identifiable and compliant with IUPAC standards? Respond with "Yes" or "No". If "No", skip creating entry
for this material
4. Strict Validation of Compound Associations, Property Values, and Units:
- Confirm the compound and "{self.property}" association: "There is a possibility that the data you extracted is incorrect. [Answer 'Yes' or 'No' only.] Be very strict. Is ',' the ',' compound for which the value of +"{self.property}"+ is given in the following text? Make sure it is a real compound."
- Validate the value and "{self.property}": "There is a possibility that the data you extracted is incorrect. [Answer 'Yes' or 'No' only.] Be very strict. Is ',' the value of the "{self.property}" for the ',' compound in the following text?" If "No", halt processing present text.
- Verify the unit of "{self.property}": "There is a possibility that the data you extracted is incorrect. [Answer 'Yes' or 'No' only.] Be very strict. Is ',' the unit of the ',' value of "{self.property}" in the following text?" If "No", halt processing present text.
5. Compile a structured entry for each verified material, property value, original unit, and method:
- Convert "{self.property}" values strictly to "{self.unit_conversion}".
- Format entry as: Material [Chemical formula], "{self.property} (Converted): [value] (Original Unit: [original unit])", Method [method]
EXAMPLES:
- Correct: Material: Material, "{self.property} (Converted): value (Original Unit: {self.unit_conversion})", Method: Method
- Incorrect (missing unit): Material: Material, "{self.property}: value", Method: Method
REMINDER:
(Be very strict): Ensure all information is accurate and complete. Exclude any material with incomplete or unclear data.
'''
system_prompt += "\n\n" + "\n\n".join(self.recent_analyses[-3:])
return system_prompt
def get_standardized_formulas(self):
"""Return a list of standardized chemical formulas."""
material_dict = {
'molybdenum disulfide': 'MoS2',
'graphene': 'C',
'quartz': 'SiO2',
'polystyrene': '(C8H8)n',
'boron nitride': 'BN',
'hexagonal boron nitride': 'BN',
'carbon nanotube': 'CNT',
'tungsten disulfide': 'WS2',
'black phosphorus': 'BP',
'silicon carbide': 'SiC',
'silicon nitride': 'Si3N4',
'titanium dioxide': 'TiO2',
'zinc oxide': 'ZnO',
'cadmium selenide': 'CdSe',
'h-BN': 'BN',
'cubic boron nitride': 'BN',
'lead sulfide': 'PbS',
'aluminum oxide': 'Al2O3',
'magnesium oxide': 'MgO'
}
composite_patterns = [
('BN/graphene', 'BNC'),
('BN/graphene/MoS2', 'BNCMoS2'),
('BN/graphene/BN', 'BNCBN'),
('TiO2/Pt/TiO2', 'TiO2PtTiO2')
]
formulas = list(material_dict.values())
formulas.extend([formula for _, formula in composite_patterns])
return formulas
def _build_keywords(self):
"""Construct keywords"""
keyword_descriptions = []
for keyword in self.keywords.get("keywords", []):
description = f'"{keyword}" (Weight: {self.keywords.get("keyword_weights", {}).get(keyword, "high")})'
synonyms = self.keywords.get("keyword_synonyms", {}).get(keyword, [])
if synonyms:
synonyms_formatted = ', '.join([f'"{syn}"' for syn in synonyms])
description += f", including synonyms like {synonyms_formatted}"
keyword_descriptions.append(description)
keywords_str = "; ".join(keyword_descriptions)
return keywords_str
def _formula_standardization(self):
"""
Returns a string containing guidelines for standardizing chemical formulas.
This can be used to help users of the system understand how to format their chemical data entries.
"""
return '''
CHEMICAL FORMULA STANDARDIZATION:
- Standardize simple compounds by removing dashes and other non-essential characters.
For example, "Al-Se" should be written as "AlSe" and "Ti-6Al-4V" should be "Ti6Al4V".
- Adjust prefixes like "h-BN" to their standard chemical formula "BN".
- Convert common material names to their respective chemical formulas for uniformity.
For example: 'Quartz' should be noted as 'SiO2', 'Graphite' as 'C', and 'Polystyrene' as '(C8H8)n'.
This ensures consistency in naming and notation across various texts.
- For composite materials such as layered or mixed compounds, concatenate the individual components without slashes or spaces.
Provide a standardized chemical formula for each component in the sequence they appear.
For example:
- "BN/graphene/BN" should be standardized to "BNCBN".
- Similarly, "TiO2/Pt/TiO2" should be written as "TiO2PtTiO2".
- Ensure all chemical formulas are presented without any spaces or special characters unless they denote a significant aspect of the chemical structure
(e.g., parentheses in polymers like (C8H8)n).
This standardization aids in maintaining consistency and clarity in reporting and analyzing chemical data. Ensure each entry adheres to these rules to improve data uniformity and readability.
'''
def _unit_conversion(self):
return f'''
\n\nUNIT CONVERSION:\n\n
(Be very strict): Convert and standardize the extracted "{self.property}" value to the unit, "{self.unit_conversion}". Record and note the "original unit" for each value to ensure accurate reporting.
(Be very strict): Verify and validate that the converted "{self.property}" value is correct. For example, you cannot be reporting that length has a unit of energy.
(Be very strict): Verify that you have recorded the "original unit" along with its extracted "{self.property}" value.
'''
def __additional_instructions(self):
"""Adds additional custom instructions if available."""
if self.additional_prompts:
return (f"\n\nEXAMPLES FOR ILLUSTRATION ONLY: Below are examples illustrating various ways "
f" '{self.property}' values and related properties might be described in texts. "
f"These examples are for understanding context and format only. "
f"Do not extract, report or include any properties from these examples in your analysis. "
f"(Be very strict): Verify and validate that you have only used this information to understand content in the present text, and not as data sources for extraction:\n"
f"{self.additional_prompts}\n\n"
)
return ""
def check_consistency(self, response_text):
blob = TextBlob(response_text)
properties = {}
for sentence in blob.sentences:
sentiment = sentence.sentiment.polarity
for keyword in self.keywords:
if keyword in sentence.lower():
if keyword in properties:
if (properties[keyword] > 0 and sentiment < 0) or (properties[keyword] < 0 and sentiment > 0):
return False
properties[keyword] = sentiment
return True
def check_relevance(self, response_text):
doc = nlp(response_text)
relevant_terms = set(['material', self.property] + self.keywords)
found_terms = set()
for token in doc:
if token.lemma_ in relevant_terms:
found_terms.add(token.lemma_)
return len(found_terms) >= len(relevant_terms) / 2
def get_element_data(self, symbol):
"""Fetch element data dynamically and cache it."""
if symbol not in self.elements:
try:
self.elements[symbol] = get_element(symbol)
except ValueError:
print(f"Error: Element {symbol} not found.")
return None
return self.elements[symbol]
def validate_chemical_formula(self, text):
pattern = r'\b([A-Z][a-z]{0,2}(\d{0,3})?)+\b|\(\b([A-Z][a-z]{0,2}(\d{0,3})?)+\b\)(\d+)'
matches = re.finditer(pattern, text)
valid_formulas = []
def parse_formula(formula):
content = {}
element_pattern = r'([A-Z][a-z]?)(\d*)'
multiplier = 1
if '(' in formula and ')' in formula:
match = re.match(r'\(([^)]+)\)(\d+)', formula)
if match:
formula = match.group(1)
multiplier = int(match.group(2))
for element_match in re.finditer(element_pattern, formula):
element_symbol, count = element_match.groups()
count = int(count) if count else 1
count *= multiplier
element_data = self.get_element_data(element_symbol)
if element_data is None:
#print(f"Element {element_symbol} not recognized.")
return False
if element_symbol in content:
content[element_symbol] += count
else:
content[element_symbol] = count
return True
for match in matches:
formula = match.group(0)
if parse_formula(formula):
valid_formulas.append(formula)
return valid_formulas
def preprocess_abstract(self, abstract, diagnostics=False):
if diagnostics:
print("Original Abstract:")
print(abstract)
clean_abstract = re.sub(r'\s{2,}', ';', abstract) # Replace multiple spaces with semicolon
clean_abstract = re.sub(r'\(\?\)', '', clean_abstract) # Remove uncertainty notations
# Remove HTML/XML tags
no_tags = re.sub(r'<[^>]+>', '', clean_abstract)
if diagnostics:
print("\nAfter Removing Tags:")
print(no_tags)
protected_formulas = re.sub(r'(\b[A-Za-z0-9]+(?:\/[A-Za-z0-9]+)+\b)', lambda x: x.group(0).replace('/', '∕'), no_tags)
# Split the cleaned text into sentences
sentences = sent_tokenize(protected_formulas)
if diagnostics:
print("\nAfter Sentence Tokenization:")
for i, sentence in enumerate(sentences):
print(f"Sentence {i+1}: {sentence}")
# Join sentences to form a continuous block of text
continuous_text = ' '.join(sentences)
# Read keywords from the file specified by self.prep_keyword_path
try:
with open(self.prep_keyword_path, "r") as f:
try:
keywords = json.loads(f.read())
except json.JSONDecodeError:
f.seek(0)
keywords = [line.strip() for line in f.readlines()]
# Create a pattern for the keywords
pattern = '|'.join(keywords)
# Filter the continuous text using the keywords
if re.search(pattern, continuous_text, re.IGNORECASE):
return continuous_text
else:
return None
except FileNotFoundError:
print(f"Error: '{self.prep_keyword_path}' file not found.")
return None
def build_history(self):
if not self.recent_analyses or len(self.recent_analyses) < 3:
return []
property_description = getattr(self, 'property', 'specific properties')
history = [
{
'role': 'user',
'parts': [f'Use these as examples on how to extract the material chemical formulas and {property_description} from the text accurately:']
},
{
'role': 'model',
# Construct the model part using the last three entries from recent_analyses
'parts': [', '.join(self.recent_analyses[-3:])]
}
]
return history
def retry_with_backoff(self, func, max_retries=5, backoff_in_seconds=1):
"""Retry a function with exponential backoff."""
attempt = 0
while attempt < max_retries:
try:
return func()
except ReadTimeoutError as e:
logging.warning(f"Timeout occurred, waiting {backoff_in_seconds * (2 ** attempt)}s before retrying...")
time.sleep(backoff_in_seconds * (2 ** attempt))
attempt += 1
except requests.exceptions.RequestException as e:
logging.error(f"An error occurred: {e}")
if attempt < max_retries - 1:
sleep_time = backoff_in_seconds * (2 ** attempt) + random.uniform(0, 1)
logging.info(f"Non-timeout error, retrying in {sleep_time:.2f} seconds.")
time.sleep(sleep_time)
attempt += 1
else:
logging.error("Max retries reached or critical error occurred.")
break
except Exception as e:
wait = backoff_in_seconds * (2 ** attempt) + random.uniform(0, 1)
logging.error(f"Error: {e}. Retrying in {wait:.2f} seconds...")
time.sleep(wait)
attempt += 1
raise Exception(f"Failed after {max_retries} retries.")
@backoff.on_exception(backoff.expo, ReadTimeoutError)
def analyze_abstract(self, abstract, max_retries=5, backoff_factor=1.5, timeout_wait=121, max_length=int(2021)):
abstract = self.preprocess_abstract(abstract)
current_part = ""
parts = []
history = self.build_history()
sentences = re.split(r'(?<=[.!?]) +', abstract)
for sentence in sentences:
if len(current_part) + len(sentence) + 1 > max_length:
parts.append(current_part)
current_part = sentence
else:
current_part += (' ' + sentence if current_part else sentence)
if current_part:
parts.append(current_part)
combined_result = ''
def make_request(part):
dynamic_prompt = f"{self._build_prompt()}\n\nNext text:\n{part}"
response = None
if self.model_type == 'gemini':
chat = self.model.start_chat(history=history)
response = chat.send_message(dynamic_prompt, generation_config=self.generation_config)
if hasattr(response, 'parts'):
analysis_result = ''.join(part.text for part in response.parts if hasattr(part, 'text'))
else:
raise ValueError("Unexpected API response format")
elif self.model_type == 'chatgpt':
response = self.model.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": dynamic_prompt},
{"role": "user", "content": part}
],
temperature=self.temperature,
max_tokens=self.max_output_tokens,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
analysis_result = response.choices[0].message.content if response.choices else "Analysis failed."
else:
raise ValueError("Unsupported model type provided. Use 'gemini' or 'gpt-4'.")
return analysis_result
for part in parts:
try:
analysis_result = self.retry_with_backoff(lambda: make_request(part), max_retries=max_retries, backoff_in_seconds=backoff_factor)
if self.check_consistency(analysis_result): # and self.validate_chemical_formula(analysis_result): #and self.check_relevance(analysis_result): #
combined_result += analysis_result.strip() + " "
else:
logging.warning("Formula verification failed.")
break
except Exception as e:
logging.error(f"Error during analysis: {e}")
continue # Move to the next part if one fails
if combined_result:
self.recent_analyses.append(combined_result.strip())
return combined_result.strip()
def parse_labelled(self, line):
pattern1 = re.compile(
rf"material:\s*([\w\s]+?),\s*\"?{self.property}\"?\s*\(Converted\):?\s*\"?(\d+\.?\d*|None)\"?\s*\(Original Unit: (\w+|None)\),\s*Method:\s*(.*)",
re.IGNORECASE
)
# Second pattern: Handles cases where the entire property descriptor is strictly quoted
pattern2 = re.compile(
rf"material:\s*([\w\s]+?),\s*\"{self.property} \(Converted\): (\d+\.?\d*|None) \(Original Unit: (\w+|None)\)\"\s*,\s*Method:\s*(.*)",
re.IGNORECASE
)
# Try to match using the first pattern
try:
match = pattern1.search(line)
if match:
material, property_value, original_unit, method = match.groups()
material = material.strip() if material else None
property_value = None if property_value == "None" else property_value
original_unit = None if original_unit == "None" else original_unit.strip()
method = method.strip() if method else "None"
return material, property_value, original_unit, method
except ValueError:
pass # If no match, pass to the second pattern
# Try to match using the second pattern
match = pattern2.search(line)
if match:
material, property_value, original_unit, method = match.groups()
material = material.strip() if material else None
property_value = None if property_value == "None" else property_value
original_unit = None if original_unit == "None" else original_unit.strip()
method = method.strip() if method else "None"
return material, property_value, original_unit, method
else:
raise ValueError("Labelled format not matched")
def parse_delimited(self, line):
if not line.strip() or re.match(r"^\|\s*(-+|\s+)\s*\|", line.strip()):
return None
if re.match(r"\|\s*[\w\s]+?\s*\|\s*[^\d\.]+\s*\|\s*[\w\s]+?\s*\|", line):
return None # Ignore header lines
pattern = re.compile(rf"\|\s*([\w\s]+?)\s*\|\s*(\d+\.?\d*)\s*\|\s*([\w\s]+?)\s*\|", re.IGNORECASE)
match = pattern.search(line)
if match:
return match.groups() + (None,)
def parse_simple(self, line):
"""
Parse lines that directly give the property value without conversion or original unit details.
Example input: "Material: InSeI, Band gap: 2.12, Method: None"
"""
# Regular expression to match the format without conversion details
pattern = re.compile(
rf"material:\s*([\w\s]+?),\s*{self.property}:\s*(\d+\.?\d*),\s*Method:\s*(.*)",
re.IGNORECASE
)
match = pattern.search(line)
if match:
material, property_value, method = match.groups()
default_unit = "unknown" # You might have a default or you may leave it as None
method = method if method else "None" # Default to "None" if method is not specified
return material, property_value, default_unit, method
else:
raise ValueError("Simple format not matched")
def parse_new_format(self, line):
pattern1 = re.compile(
r"\*\*Material\*\*:\s*([\w\s]+)\n\s*-\s*\*\*Thickness \(Converted\)\*\*:\s*([\d\.]+\s*Å\s*to\s*[\d\.]+\s*Å)\s*\(Original Unit:\s*(\w+)\)\n\s*-\s*\*\*Method\*\*:\s*(.*)",
re.IGNORECASE
)
# Second pattern: Handles single values (e.g., 13 Å)
pattern2 = re.compile(
r"\*\*Material\*\*:\s*([\w\s]+)\n\s*-\s*\*\*Thickness \(Converted\)\*\*:\s*([\d\.]+\s*Å)\s*\(Original Unit:\s*(\w+)\)\n\s*-\s*\*\*Method\*\*:\s*(.*)",
re.IGNORECASE
)
try:
match = pattern1.search(line)
if match:
material, thickness, original_unit, method = match.groups()
material = material.strip() if material else None
thickness = thickness.strip() if thickness else None
original_unit = original_unit.strip() if original_unit else None
method = method.strip() if method else "None"
return material, thickness, original_unit, method
except ValueError:
pass
match = pattern2.search(line)
if match:
material, thickness, original_unit, method = match.groups()
material = material.strip() if material else None
thickness = thickness.strip() if thickness else None
original_unit = original_unit.strip() if original_unit else None
method = method.strip() if method else "None"
return material, thickness, original_unit, method
else:
raise ValueError("New format not matched")
def parse_analysis_results(self, analysis_result):
valid_entries = []
lines = analysis_result.split('\n')
for line in lines:
result = None
try:
result = self.parse_labelled(line)
except ValueError:
pass
if not result:
try:
result = self.parse_delimited(line)
except ValueError:
pass
if not result:
try:
result = self.parse_simple(line)
except ValueError:
pass
if not result:
try:
result = self.parse_new_format(line)
except ValueError:
continue
if result:
material, property_value, original_unit, method = result
if property_value is not None:
property_value = property_value.strip()
if original_unit is not None:
original_unit = original_unit.strip()
if method is not None:
method = method.strip()
if material and material.strip() and property_value and property_value.strip():
valid_entries.append((material, property_value, original_unit, method))
return valid_entries
def read_file_with_detected_encoding(self, filepath, sample_size=4096):
"""Read a file using detected encoding, with fallback and error handling."""
try:
with open(filepath, 'rb') as file:
raw_data = file.read(sample_size)
result = detect(raw_data)
encoding = result['encoding']
confidence = result['confidence']
logging.info(f"Detected encoding {encoding} with confidence {confidence}")
if encoding and confidence > 0.5:
try:
return pd.read_csv(filepath, encoding=encoding)
except UnicodeDecodeError:
#logging.error(f"Failed to decode file {filepath} with detected encoding {encoding}. Trying UTF-8.")
return pd.read_csv(filepath, encoding='utf-8') # Fallback to UTF-8
else:
#logging.info(f"Low confidence in detected encoding. Trying UTF-8 as fallback.")
return pd.read_csv(filepath, encoding='utf-8')
except Exception as e:
#logging.error(f"Unhandled error while reading file {filepath}: {e}")
raise ValueError(f"Failed to read the file {filepath} with any known encoding.") from e
def process_and_save_text(self, input_csv, text_column, output_csv_prefix): #='materialppt'):
file_extension = os.path.splitext(input_csv)[-1].lower()
#timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_csv = output_csv_prefix #f"{output_csv_prefix}_{timestamp}.csv"
if os.path.exists(output_csv):
os.remove(output_csv)
logging.info(f"Existing file {output_csv} removed.")
try:
if file_extension == '.csv':
examples = self.read_file_with_detected_encoding(input_csv) #pd.read_csv(input_csv) #
elif file_extension in ['.xls', '.xlsx']:
examples = pd.read_excel(input_csv)
else:
message = "Unsupported file format"
logging.error(message)
print(message)
return
except Exception as e:
message = f"Error reading the file: {e}"
print(message)
logging.error(message)
return
examples.dropna(subset=[text_column], how='all', inplace=True)
file_exists = os.path.isfile(output_csv)
for index, row in examples.iterrows():
try:
abstract = row[text_column]
logging.info(f"Analyzing text {index + 1}/{len(examples)}")
analysis_result = self.analyze_abstract(abstract)
#print("analysis_result", analysis_result)
parsed_results = self.parse_analysis_results(analysis_result)
#print("parsed_results ", parsed_results)
#if not parsed_results:
# logging.info(f"No valid data found for abstract {index + 1}. Skipping...")
# continue
with open(output_csv, 'a', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
if not file_exists:
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
writer.writerow([f"Timestamp: {timestamp}"])
writer.writerow(["MaterialName", f"{self.property.capitalize()}({self.unit_conversion})", "OriginalUnit", "Method"])
file_exists = True
for material, property_value, unit, method in parsed_results:
#if material and property_value:
if material is not None and material.strip() and property_value is not None and property_value.strip():
writer.writerow([material, property_value, unit, method])
logging.info(f"Material: {material}, {self.property}: {property_value} (Unit: {unit})")
else:
logging.warning(f"Incomplete data for abstract {index + 1}. Skipping entry.")
except Exception as e:
logging.error(f"An error occurred processing abstract {index + 1}: {e}")
| 38,409 | Python | .py | 654 | 45.466361 | 336 | 0.598107 | gmp007/PropertyExtractor | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,907 | writeout.py | gmp007_PropertyExtractor/src/writeout.py | """
PropertyExtractor -- LLM-based model to extract material property from unstructured dataset
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation
version 3 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
Email: [email protected]
"""
import os
import json
from datetime import datetime
def write_default_inputs(cwd):
"""
Writes a default 'extract.in' file in the specified directory if it does not exist.
Parameters:
cwd (str): The current working directory where the file should be written.
"""
file_path = os.path.join(cwd, "extract.in")
if not os.path.exists(file_path):
extract_input = """
###############################################################################
### The input file to control the calculation details of PropertyExtract ###
###############################################################################
# Type of LLM model: gemini/chatgpt
model_type = gemini
# LLM model name: gemini-pro/gpt-4
model_name = gemini-pro
# Property to extract from texts
property = thickness
# Harmonized unit for the property to be extracted
property_unit = Angstrom
# temperature to max_output_tokens are LLM model parameters
temperature = 0.0
top_p = 0.95
max_output_tokens = 80
# You can supply additional keywords to be used in conjunction with the property: modify the file keywords.json
use_keywords = True
# You can add additional custom prompts: modify the file additionalprompt.txt
additional_prompts = additionalprompt.txt
# Name of input file to be processed: csv/excel format
inputfile_name = inputfile.csv
# Column name in the input file to be processed
column_name = Text
# Name of output file
outputfile_name = outputfile.csv
"""
try:
with open(file_path, "w") as file:
file.write(extract_input.strip()) # Using .strip() to clean up leading/trailing whitespace
print(f"'extract.in' created successfully in {cwd}.")
except IOError as e:
print(f"Failed to write to the file: {e}")
else:
print(f"'extract.in' already exists in {cwd}. No action was taken.")
def write_prep_keyword_prompts(cwd):
"""
Writes a default prep_keyword.txt file in the specified directory if it does not exist.
Parameters:
cwd (str): The current working directory where the file should be written.
"""
file_path = os.path.join(cwd, "prep_keyword.txt")
if not os.path.exists(file_path):
# Define the content to be written to the file
pre_key = """["bandgap", "band gap"]\n"""
try:
with open(file_path, "w") as file:
file.write(pre_key)
print(f"'prep_keyword.txt' created successfully in {cwd}.")
except IOError as e:
print(f"Failed to write to the file: {e}")
else:
print(f"'prep_keyword.txt' already exists in {cwd}. No action was taken.")
def write_additionalprompts(cwd):
"""
Writes a default additionalprompt.txt file in the specified directory if it does not exist.
Parameters:
cwd (str): The current working directory where the file should be written.
"""
file_path = os.path.join(cwd, "additionalprompt.txt")
if not os.path.exists(file_path):
# Define the content to be written to the file
additional_prompt_text = """ - Titanium dioxide films reported at 1 µm thickness: Report as "Material: TiO2, Thickness: 10".
- Text mentions 2D-based h-AB monolayer with a thickness of 0.34 nm obtaied using AFM: Report as "Material: AB, Thickness: 3.4, Unit: nm, Method: AFM".
- Text mentions the thickness of material "ABC3" is 60 Å from our experimental data analysis: Report as "Material: ABC3, Thickness: 60, Unit: Å, Method: Experiment ".
"""
try:
with open(file_path, "w") as file:
file.write(additional_prompt_text)
print(f"'additionalprompt.txt' created successfully in {cwd}.")
except IOError as e:
print(f"Failed to write to the file: {e}")
else:
print(f"'additionalprompt.txt' already exists in {cwd}. No action was taken.")
def write_default_keywords(cwd):
"""
Writes a default keywords.json file in the specified directory if it does not exist.
Parameters:
cwd (str): The current working directory where the file should be written.
"""
# Define the path to the file
file_path = os.path.join(cwd, "keywords.json")
# Check if the file already exists
if not os.path.exists(file_path):
# Define the JSON data to be written to the file
keywords_data = {
"keywords": ["2D materials", "ultrathin materials", "van der Waals materials"],
"keyword_weights": {
"2D materials": "high",
"ultrathin materials": "medium",
"van der Waals materials": "high"
},
"keyword_synonyms": {
"2D materials": ["two-dimensional materials"],
"van der Waals materials": ["vdW materials"]
}
}
# Write the JSON data to the file
try:
with open(file_path, "w") as file:
json.dump(keywords_data, file, indent=4)
print(f"'keywords.json' created successfully in {cwd}.")
except IOError as e:
print(f"Failed to write to the file: {e}")
else:
print(f"'keywords.json' already exists in {cwd}. No action was taken.")
def print_default_input_message_0():
print("╔════════════════════════════════════════════════════════════════════════════════╗")
print("║ ║")
print("║ ♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥ ║")
print("║ ♥♥♥ Default extract.in input template generated. ♥♥♥ ║")
print("║ ♥♥ Modify and rerun thick2d -0 to generate other ♥♥ ║")
print("║ ♥♥ important input files. Happy running :) ♥♥ ║")
print("║ ♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥ ║")
print("║ Exiting... ║")
print("║ ║")
print("╚════════════════════════════════════════════════════════════════════════════════╝")
def print_default_input_message():
print("╔══════════════════════════════════════════════════════════════════════════╗")
print("║ ║")
print("║ ♥♥ ♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥ ♥♥ ║")
print("║ ♥♥♥ ♥♥♥ ║")
print("║ ♥ All default inputs written to files. ♥ ║")
print("║ ♥ Modify according LLM model/type ♥ ║")
print("║ ♥ Run code with propextract ♥ ║")
print("║ ♥ Happy running :) ♥ ║")
print("║ ♥♥♥ ♥♥♥ ║")
print("║ ♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥♥ ║")
print("║ Exiting... ║")
print("║ ║")
print("╚══════════════════════════════════════════════════════════════════════════╝")
max_width = len("|WARNING: This is an empirical approx; validity needs to be checked !! |")
def print_line(ec_file,content, padding=1, border_char="|", filler_char=" "):
content_width = int(max_width) - (2 * int(padding)) - 2 # Subtract 2 for the border characters
content = content[:content_width] # Ensure content doesn't exceed the width
line = border_char + filler_char*padding + content.ljust(content_width) + filler_char*padding + border_char
#print(line) # Print the line to the console
if ec_file:
ec_file.write(line + "\n")
else:
print(line)
def print_banner(version,code_type,model_type,ec_file=None):
current_time = datetime.now().strftime('%H:%M:%S')
current_date = datetime.now().strftime('%Y-%m-%d')
conclusion_msg = f"Calculations started at {current_time} on {current_date}"
message = f"General languagge model simulations using \nPropertyExtract Version: {version}\n with {code_type} conversational LLM {model_type} model \nto perform simulations\n{conclusion_msg}"
max_width = 80 # Define the maximum width for the banner
print_line(ec_file,'❤' * (max_width - 2), padding=0, border_char='❤', filler_char='❤')
for line in message.split('\n'):
centered_line = line.center(max_width - 4)
print_line(ec_file,centered_line, padding=1, border_char='❤')
print_line(ec_file,'❤' * (max_width - 2), padding=0, border_char='❤', filler_char='❤')
def print_boxed_message(ec_file=None):
header_footer = "+" + "-" * 78 + "+"
spacer = "| " + " " * 76 + " |"
# List of lines to be printed
lines = [
(" * CITATIONS *", True),
("If you have used PropertyExtractor in your research, PLEASE cite:", False),
("", False), # Space after the above line
("PropertyExtractor: ", False),
("Dynamic in-context learning with conversational language", False),
("models for data extraction and materials property prediction ", False),
("C.E. Ekuma, ", False),
("XXX xxx, xxx, (2024)", False),
("", False),
("", False), # Blank line for separation
("PropertyExtractor: ", False),
("A conversational large language model for extracting ", False),
("physical properties from scientific corpus, C.E. Ekuma,", False),
("www.github.com/gmp007/propertyextractor", False)
]
def output_line(line):
if ec_file:
ec_file.write(line + "\n")
else:
print(line)
output_line(header_footer)
for line, underline in lines:
centered_line = line.center(76)
output_line("| " + centered_line + " |")
if underline:
underline_str = "-" * len(centered_line)
output_line("| " + underline_str.center(76) + " |")
# Print footer of the box
output_line(header_footer)
| 12,033 | Python | .py | 207 | 46.101449 | 195 | 0.547842 | gmp007/PropertyExtractor | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,908 | settings.py | N4GR_Fortnite-Lego-AFK-XP/settings.py | import json
class settings:
class bindings:
def __init__(self) -> None:
with open("bindings.json") as f:
self.bindings = json.load(f)
def getBindings(self) -> dict[str]:
return self.bindings
def setBindings(self, type: str, binding: str):
new_bindings = {
"forward": binding if type == "forward" else self.bindings["forward"],
"backward": binding if type == "backward" else self.bindings["backward"],
"left": binding if type == "left" else self.bindings["left"],
"right": binding if type == "right" else self.bindings["right"]
}
with open("bindings.json", "w") as f:
json.dump(new_bindings, f, indent = 4)
class options:
def __init__(self) -> None:
with open("options.json") as f:
self.options = json.load(f)
def getOptions(self) -> dict[list[int]]:
return self.options | 1,047 | Python | .py | 23 | 32.478261 | 89 | 0.541877 | N4GR/Fortnite-Lego-AFK-XP | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,909 | versioncheck.py | N4GR_Fortnite-Lego-AFK-XP/versioncheck.py | import json
import requests
from log import log
from time import sleep
class get:
def __init__(self) -> None:
with open("version.json") as f:
self.options = json.load(f)
def creatorname(self) -> str:
"""
Retrieves the creators name.
Returns:
str: Example of creatorname: "N4GR"
"""
return self.options["creator"]
def creatorlink(self) -> str:
"""
Retrieves the creators name.
Returns:
str: Example of creatorlink: "https://github.com/N4GR"
"""
return self.options["creator_link"]
def version(self) -> int:
"""
Retrieves the current version.
Returns:
int: Example of version: 2
"""
return self.options["version"]
def reponame(self) -> str:
"""
Retrieves the repository name.
Returns:
str: Example of reponame: "Fortnite-Lego-AFK-XP"
"""
return self.options["repo_name"]
class versioning:
def __init__(self) -> None:
self.response = self.checkConnection()[1]
self.connection = self.checkConnection()[0]
def checkConnection(self) -> tuple[bool | dict]:
"""
Checks if the user is connected to the internet.
Returns:
tuple(bool, dict): True if successful connection, False if not. Also returns dict of response.
"""
try:
response = requests.get(f"https://api.github.com/repos/{get().creatorname()}/{get().reponame()}/releases/latest", timeout = 5)
return True, response
except requests.ConnectionError:
return False
def latestVersion(self) -> str:
"""
Retrieves the latest version from github.
Returns:
str: Example of latestversion: "v2"
"""
return self.response.json()["tag_name"]
def version_check(self) -> bool:
"""
Checks if the latest version from github is the same as the currently installed version.
Returns:
bool: True if the version is the same, False if the connection failed or the version isn't the same.
"""
if self.connection is False:
return False, "connection-fail"
if get().version() != self.latestVersion():
return False, "different-version"
else:
return True, "Complete"
if __name__ == "__main__":
check = versioning()
version_check = check.version_check()
successful_connection = version_check[0]
connection_note = version_check[1]
if successful_connection is False and connection_note == "different-version":
print(log().error(f"NOT RUNNING THE LATEST VERSION\n"))
print(log().error(f"Your version: v{get().version()}"))
print(log().error(f"latest version: {check.latestVersion()}\n"))
print(log().error(f"The newest version can be found here: {get().creatorlink()}/{get().reponame()}/releases/latest"))
input(log().error("Press enter to continue anyway..."))
else:
print(log().success(f"Passed version check"))
sleep(2) | 3,198 | Python | .py | 84 | 29.22619 | 138 | 0.602869 | N4GR/Fortnite-Lego-AFK-XP | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,910 | log.py | N4GR_Fortnite-Lego-AFK-XP/log.py | from datetime import datetime
class log:
def __init__(self) -> None:
now = datetime.now()
self.current_date = now.strftime("%d-%m-%Y %H:%M:%S")
self.red = "\033[31m"
self.green = "\033[32m"
self.yellow = "\033[33m"
self.blue = "\033[34m"
self.end = "\033[0m"
def error(self, message: str) -> str:
return f"{self.yellow} {self.current_date} {self.end}|{self.red} ERROR {self.end}| {message}"
def note(self, message: str) -> str:
return f"{self.yellow} {self.current_date} {self.end}|{self.blue} NOTE {self.end}| {message}"
def success(self, message: str) -> str:
return f"{self.yellow} {self.current_date} {self.end}|{self.green} SUCCESS {self.end}| {message}" | 761 | Python | .py | 16 | 40.25 | 105 | 0.594595 | N4GR/Fortnite-Lego-AFK-XP | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,911 | main.py | N4GR_Fortnite-Lego-AFK-XP/main.py | from character import character
from random import randint, choice
from time import sleep
from log import log
class main:
def __init__(self) -> None:
self.player = character()
def movement(self) -> list[object]:
movements = [self.player.forward, self.player.backward, self.player.left, self.player.right]
return movements
def randomAction(self) -> tuple[int | list[object]]:
illegal_actions = [
["left", "right"],
["right", "left"],
["forward", "backward"],
["backward", "forward"],
["left", "left"],
["right", "right"],
["forward", "forward"],
["backward", "backward"]
]
action_time = randint(1, 10)
while True:
actions = []
for x in range(randint(1, 2)):
actions.append(choice(self.movement()))
stuff = []
for action in actions:
stuff.append(action.__name__)
if stuff in illegal_actions:
continue
else:
break
print(log().note(f"Randomly chosen the aciton/s: {(stuff[0], stuff[1]) if len(stuff) == 2 else stuff[0]}..."))
return action_time, actions
def start(self) -> None:
char = character()
times_ran = 1
while True:
print(log().note(f"Ran {times_ran} time/s..."))
complete_actions = []
action_time, actions = self.randomAction()
for action in actions:
print(log().note(f"Pressing the action: {action.__name__}..."))
action()
complete_actions.append(action)
print(log().note(f"Waiting {action_time}s until next action..."))
for x in range(action_time):
roll = randint(1, 20)
if roll == 1:
char.jump()
elif roll == 2:
char.punch()
sleep(1)
print(log().note(f"Waiting {x + 1}/{action_time}..."))
for action in complete_actions:
self.player.stop(action.__name__)
print(log().note(f"Stopping {action.__name__}..."))
print(log().success(f"Successfully completed actions, going again...\n"))
times_ran += 1
if __name__ == "__main__":
start_time = 5
print(log().note(f"Program will begin in {start_time}s..."))
for x in range(start_time):
sleep(1)
print(log().note(f"{x + 1}/{start_time}s..."))
print(log().note(f"Beginning now...\n"))
main().start() | 2,686 | Python | .py | 68 | 27.632353 | 118 | 0.512641 | N4GR/Fortnite-Lego-AFK-XP | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,912 | character.py | N4GR_Fortnite-Lego-AFK-XP/character.py | import pyautogui
from settings import settings
class character:
def __init__(self) -> None:
self.bindings = settings.bindings().getBindings()
def forward(self) -> bool:
try:
pyautogui.keyDown(self.bindings["forward"])
return True
except pyautogui.PyAutoGUIException:
return False
def backward(self) -> bool:
try:
pyautogui.keyDown(self.bindings["backward"])
return True
except pyautogui.PyAutoGUIException:
return False
def left(self) -> bool:
try:
pyautogui.keyDown(self.bindings["left"])
return True
except pyautogui.PyAutoGUIException:
return False
def right(self) -> bool:
try:
pyautogui.keyDown(self.bindings["right"])
return True
except pyautogui.PyAutoGUIException:
return False
def jump(self) -> bool:
try:
pyautogui.press(self.bindings["jump"])
return True
except pyautogui.PyAutoGUIException:
return False
def punch(self) -> bool:
try:
pyautogui.leftClick()
return True
except pyautogui.PyAutoGUIException:
return False
def stop(self, movement: str) -> bool:
try:
method = getattr(character, movement)
binding = self.bindings[method.__name__]
pyautogui.keyUp(binding)
return True
except pyautogui.PyAutoGUIException:
return False | 1,615 | Python | .py | 49 | 22.265306 | 57 | 0.59052 | N4GR/Fortnite-Lego-AFK-XP | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,913 | demo_v3.py | shilapi_DGLAB-python-driver/demo_v3.py | # Description: This is a demo script to show how to use the pydglab library to interact with the DGLab device.
import asyncio
import logging
import pydglab
from pydglab import model_v3
logging.basicConfig(
format="%(module)s [%(levelname)s]: %(message)s", level=logging.DEBUG
)
async def _():
await pydglab.scan()
dglab_instance = pydglab.dglab_v3()
try:
await dglab_instance.create()
except TimeoutError:
logging.error("Timeout, retrying...")
await dglab_instance.create()
await dglab_instance.set_coefficient(100, 100, 100, model_v3.ChannelA)
await dglab_instance.set_coefficient(100, 100, 100, model_v3.ChannelB)
await dglab_instance.get_strength()
await dglab_instance.set_strength_sync(1, 1)
await dglab_instance.set_wave_sync(0, 0, 0, 0, 0, 0)
await dglab_instance.set_wave_set(
model_v3.Wave_set["Going_Faster"], model_v3.ChannelA
)
await dglab_instance.get_strength()
await asyncio.sleep(2)
await dglab_instance.close()
asyncio.run(_())
| 1,044 | Python | .py | 28 | 32.75 | 110 | 0.71556 | shilapi/DGLAB-python-driver | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,914 | demo_v2.py | shilapi_DGLAB-python-driver/demo_v2.py | # Description: This is a demo script to show how to use the pydglab library to interact with the DGLab device.
import asyncio
import logging
import pydglab
from pydglab import model_v2
logging.basicConfig(
format="%(module)s [%(levelname)s]: %(message)s", level=logging.INFO
)
async def _():
await pydglab.scan()
dglab_instance = pydglab.dglab()
try:
await dglab_instance.create()
except TimeoutError:
logging.error("Timeout, retrying...")
await dglab_instance.create()
await dglab_instance.get_strength()
await dglab_instance.set_strength_sync(1, 1)
await dglab_instance.set_wave_sync(0, 0, 0, 0, 0, 0)
await dglab_instance.set_wave_set(model_v2.Wave_set["Going_Faster"], model_v2.ChannelA)
await dglab_instance.get_batterylevel()
await dglab_instance.get_strength()
await asyncio.sleep(2)
await dglab_instance.close()
asyncio.run(_()) | 919 | Python | .py | 25 | 32.48 | 110 | 0.720721 | shilapi/DGLAB-python-driver | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,915 | bthandler_v2.py | shilapi_DGLAB-python-driver/pydglab/bthandler_v2.py | import logging
from bleak import BleakClient, BleakScanner
from typing import Tuple, List
from bitstring import BitArray
from pydglab.model_v2 import *
from pydglab.uuid import *
logger = logging.getLogger(__name__)
async def scan():
"""
Scan for DGLAB v2.0 devices and return a list of tuples with the address and the RSSI of the devices found.
Returns:
List[Tuple[str, int]]: (address, RSSI)
"""
devices = await BleakScanner().discover(return_adv=True)
dglab_v2: List[Tuple[str, int]] = []
for i, j in devices.values():
if j.local_name == CoyoteV2.name and i.address is not None:
logger.info(f"Found DGLAB v2.0 {i.address}")
dglab_v2.append((i.address, j.rssi))
if not dglab_v2:
logger.error("No DGLAB v2.0 found")
return dglab_v2
async def scan_():
dglab_v2 = await scan()
if not dglab_v2:
raise Exception("No DGLAB v2.0 found")
if len(dglab_v2) > 1:
logger.warning("Multiple DGLAB v2.0 found, chosing the closest one")
elif len(dglab_v2) == 0:
raise Exception("No DGLAB v2.0 found")
return sorted(dglab_v2, key=lambda device: device[1])[0][0]
async def get_batterylevel_(client: BleakClient, characteristics: CoyoteV2):
r = await client.read_gatt_char(characteristics.characteristicBattery)
return r
async def get_strength_(client: BleakClient, characteristics: CoyoteV2):
r = await client.read_gatt_char(characteristics.characteristicEStimPower)
# logger.debug(f"Received strenth bytes: {r.hex()} , which is {r}")
r.reverse()
r = BitArray(r).bin
# logger.debug(f"Received strenth bytes after decoding: {r}")
return int(int(r[-22:-11], 2) / 2047 * 200), int(int(r[-11:], 2) / 2047 * 200)
async def set_strength_(
client: BleakClient, value: Coyote, characteristics: CoyoteV2
):
# Create a byte array with the strength values.
# The values are multiplied by 11 to convert them to the correct range.
# It seems that the max value is 2047, but the range is 0-200, so we divide by 2047 and multiply by
strengthA = int(int(value.ChannelA.strength) * 2047 / 200)
strengthB = int(int(value.ChannelB.strength) * 2047 / 200)
if (
value.ChannelA.strength is None
or value.ChannelA.strength < 0
or value.ChannelA.strength > 2047
):
value.ChannelA.strength = 0
if (
value.ChannelB.strength is None
or value.ChannelB.strength < 0
or value.ChannelB.strength > 2047
):
value.ChannelB.strength = 0
array = ((strengthA << 11) + strengthB).to_bytes(3, byteorder="little")
# logger.debug(f"Sending bytes: {array.hex()} , which is {array}")
r = await client.write_gatt_char(
characteristics.characteristicEStimPower, bytearray(array), response=False
)
return value.ChannelA.strength, value.ChannelB.strength
async def set_wave_(
client: BleakClient,
value: ChannelA | ChannelB,
characteristics: CoyoteV2,
):
# Create a byte array with the wave values.
array = ((value.waveZ << 15) + (value.waveY << 5) + value.waveX).to_bytes(
3, byteorder="little"
)
# logger.debug(f"Sending bytes: {array.hex()} , which is {array}")
r = await client.write_gatt_char(
(
characteristics.characteristicEStimA
if type(value) is ChannelA
else characteristics.characteristicEStimB
),
bytearray(array),
response=False,
)
return value.waveX, value.waveY, value.waveZ
| 3,556 | Python | .py | 87 | 34.954023 | 111 | 0.674688 | shilapi/DGLAB-python-driver | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,916 | uuid.py | shilapi_DGLAB-python-driver/pydglab/uuid.py | # This file contains the UUIDs for the D-LAB ESTIM01 device
from typing import Union
from bleak import BleakGATTCharacteristic
class CoyoteV2(object):
name: str = "D-LAB ESTIM01"
serviceBattery: str = "955a180a-0fe2-f5aa-a094-84b8d4f3e8ad"
characteristicBattery: Union[str, BleakGATTCharacteristic] = (
"955a1500-0fe2-f5aa-a094-84b8d4f3e8ad"
)
serviceEStim: str = "955a180b-0fe2-f5aa-a094-84b8d4f3e8ad"
characteristicEStimPower: Union[str, BleakGATTCharacteristic] = (
"955a1504-0fe2-f5aa-a094-84b8d4f3e8ad"
)
characteristicEStimB: Union[str, BleakGATTCharacteristic] = (
"955a1505-0fe2-f5aa-a094-84b8d4f3e8ad"
)
characteristicEStimA: Union[str, BleakGATTCharacteristic] = (
"955a1506-0fe2-f5aa-a094-84b8d4f3e8ad"
)
class CoyoteV3(object):
name: str = "47L121000"
wirelessSensorName: str = "47L120100"
serviceWrite: str = "0000180c-0000-1000-8000-00805f9b34fb"
serviceNotify: str = "0000180c-0000-1000-8000-00805f9b34fb"
characteristicWrite: Union[str, BleakGATTCharacteristic] = (
"0000150A-0000-1000-8000-00805f9b34fb"
)
characteristicNotify: Union[str, BleakGATTCharacteristic] = (
"0000150B-0000-1000-8000-00805f9b34fb"
)
| 1,255 | Python | .py | 30 | 36.533333 | 69 | 0.738525 | shilapi/DGLAB-python-driver | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,917 | bthandler_v3.py | shilapi_DGLAB-python-driver/pydglab/bthandler_v3.py | import logging
from bleak import BleakClient, BleakScanner
from typing import Tuple, List
from bitstring import BitArray
from pydglab.model_v3 import *
from pydglab.uuid import *
logger = logging.getLogger(__name__)
async def scan():
"""
Scan for DGLAB v3.0 devices and return a list of tuples with the address and the RSSI of the devices found.
Returns:
List[Tuple[str, int]]: (address, RSSI)
"""
devices = await BleakScanner().discover(return_adv=True)
dglab_v3: List[Tuple[str, int]] = []
for i, j in devices.values():
if j.local_name == CoyoteV3.name and i.address is not None:
logger.info(f"Found DGLAB v3.0 {i.address}")
dglab_v3.append((i.address, j.rssi))
if not dglab_v3:
logger.error("No DGLAB v3.0 found")
return dglab_v3
async def scan_():
dglab_v3 = await scan()
if not dglab_v3:
raise Exception("No DGLAB v3.0 found")
if len(dglab_v3) > 1:
logger.warning("Multiple DGLAB v3.0 found, chosing the closest one")
elif len(dglab_v3) == 0:
raise Exception("No DGLAB v3.0 found")
return sorted(dglab_v3, key=lambda device: device[1])[0][0]
async def notify_(client: BleakClient, characteristics: CoyoteV3, callback: callable):
await client.start_notify(characteristics.characteristicNotify, callback)
async def write_strenth_(
client: BleakClient, value: Coyote, characteristics: CoyoteV3
):
struct = (
0xB0,
0b00010000 + 0b00001111,
value.ChannelA.strength,
value.ChannelB.strength,
value.ChannelA.wave,
value.ChannelA.waveStrenth,
value.ChannelB.wave,
value.ChannelB.waveStrenth,
)
bytes_ = bytes(
tuple(
item if isinstance(item, int) else subitem
for item in struct
for subitem in (tuple(item) if isinstance(item, list) else (item,))
)
)
logger.debug(f"Sending bytes: {bytes_.hex()} , which is {bytes_}")
await client.write_gatt_char(characteristics.characteristicWrite, bytes_)
async def write_coefficient_(
client: BleakClient, value: Coyote, characteristics: CoyoteV3
):
struct = (
0xBF,
value.ChannelA.limit,
value.ChannelB.limit,
value.ChannelA.coefficientFrequency,
value.ChannelB.coefficientFrequency,
value.ChannelA.coefficientStrenth,
value.ChannelB.coefficientStrenth,
)
bytes_ = bytes(
tuple(
item if isinstance(item, int) else subitem
for item in struct
for subitem in (item if isinstance(item, tuple) else (item,))
)
)
logger.debug(f"Sending bytes: {bytes_.hex()} , which is {bytes_}")
await client.write_gatt_char(characteristics.characteristicWrite, bytes_)
| 2,808 | Python | .py | 76 | 30.355263 | 111 | 0.666421 | shilapi/DGLAB-python-driver | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,918 | model_v3.py | shilapi_DGLAB-python-driver/pydglab/model_v3.py | from typing import Optional
class ChannelA(object):
def __init__(self):
self.strength: Optional[int] = None
self.wave: Optional[list[int]] = [0, 0, 0, 0]
self.waveStrenth: Optional[list[int]] = [0, 0, 0, 0]
self.coefficientStrenth: Optional[int] = None
self.coefficientFrequency: Optional[int] = None
self.limit: Optional[int] = None
class ChannelB(object):
def __init__(self):
self.strength: Optional[int] = None
self.wave: Optional[list[int]] = [0, 0, 0, 0]
self.waveStrenth: Optional[list[int]] = [0, 0, 0, 0]
self.coefficientStrenth: Optional[int] = None
self.coefficientFrequency: Optional[int] = None
self.limit: Optional[int] = None
class Coyote(object):
def __init__(self):
self.ChannelA: Optional[ChannelA] = ChannelA()
self.ChannelB: Optional[ChannelB] = ChannelB()
Wave_set = {
"Going_Faster": [
(5, 135, 20),
(5, 125, 20),
(5, 115, 20),
(5, 105, 20),
(5, 95, 20),
(4, 86, 20),
(4, 76, 20),
(4, 66, 20),
(3, 57, 20),
(3, 47, 20),
(3, 37, 20),
(2, 28, 20),
(2, 18, 20),
(1, 14, 20),
(1, 9, 20),
],
}
| 1,270 | Python | .py | 40 | 24.25 | 60 | 0.536825 | shilapi/DGLAB-python-driver | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,919 | __init__.py | shilapi_DGLAB-python-driver/pydglab/__init__.py | import logging, os
LOGFORMAT = "%(module)s [%(levelname)s]: %(message)s"
_logger = logging.getLogger(__name__)
if bool(os.environ.get("BLEAK_LOGGING", False)):
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(fmt=LOGFORMAT))
_logger.addHandler(handler)
from .service import dglab, dglab_v3
from .bthandler_v2 import scan
from .bthandler_v3 import scan
| 428 | Python | .py | 11 | 36.090909 | 58 | 0.757869 | shilapi/DGLAB-python-driver | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,920 | model_v2.py | shilapi_DGLAB-python-driver/pydglab/model_v2.py | from typing import Optional
class ChannelA(object):
def __init__(self):
self.strength: Optional[int] = None
self.wave: Optional[bytearray[int]] = bytearray((0, 0, 0))
self.waveX: Optional[int] = self.wave[0]
self.waveY: Optional[int] = self.wave[1]
self.waveZ: Optional[int] = self.wave[2]
class ChannelB(object):
def __init__(self):
self.strength: Optional[int] = None
self.wave: Optional[bytearray[int]] = bytearray((0, 0, 0))
self.waveX: Optional[int] = self.wave[0]
self.waveY: Optional[int] = self.wave[1]
self.waveZ: Optional[int] = self.wave[2]
class Coyote(object):
def __init__(self):
self.ChannelA: ChannelA = ChannelA()
self.ChannelB: ChannelB = ChannelB()
self.Battery: Optional[int] = None
Wave_set = {
"Going_Faster": [
(5, 135, 20),
(5, 125, 20),
(5, 115, 20),
(5, 105, 20),
(5, 95, 20),
(4, 86, 20),
(4, 76, 20),
(4, 66, 20),
(3, 57, 20),
(3, 47, 20),
(3, 37, 20),
(2, 28, 20),
(2, 18, 20),
(1, 14, 20),
(1, 9, 20),
],
}
| 1,189 | Python | .py | 39 | 23.025641 | 66 | 0.523643 | shilapi/DGLAB-python-driver | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,921 | service.py | shilapi_DGLAB-python-driver/pydglab/service.py | import logging, asyncio, time
from bleak import BleakClient
from typing import Tuple
import pydglab.model_v2 as model_v2
import pydglab.model_v3 as model_v3
from pydglab.uuid import *
import pydglab.bthandler_v2 as v2
import pydglab.bthandler_v3 as v3
logger = logging.getLogger(__name__)
class dglab(object):
coyote = model_v2.Coyote()
def __init__(self, address: str = None) -> None:
self.address = address
return None
async def create(self) -> "dglab":
"""
建立郊狼连接并初始化。
Creates a connection to the DGLAB device and initialize.
Returns:
dglab: The initialized DGLAB object.
Raises:
Exception: If the device is not supported or if an unknown device is connected.
"""
if self.address is None:
# If address is not provided, scan for it.
self.address = await v2.scan_()
# Connect to the device.
logger.debug(f"Connecting to {self.address}")
self.client = BleakClient(self.address, timeout=20.0)
await self.client.connect()
# Wait for a second to allow service discovery to complete
await asyncio.sleep(1)
# Check if the device is valid.
services = self.client.services
service = [service.uuid for service in services]
logger.debug(f"Got services: {str(service)}")
if CoyoteV2.serviceBattery in service and CoyoteV2.serviceEStim in service:
logger.info("Connected to DGLAB v2.0")
# Update BleakGATTCharacteristic into characteristics list, to optimize performence.
self.characteristics = CoyoteV2
logger.debug(f"Got characteristics: {str(self.characteristics)}")
for i in self.client.services.characteristics.values():
if i.uuid == self.characteristics.characteristicBattery:
self.characteristics.characteristicBattery = i
elif i.uuid == self.characteristics.characteristicEStimPower:
self.characteristics.characteristicEStimPower = i
elif i.uuid == self.characteristics.characteristicEStimA:
self.characteristics.characteristicEStimA = i
elif i.uuid == self.characteristics.characteristicEStimB:
self.characteristics.characteristicEStimB = i
elif CoyoteV3.serviceWrite in service and CoyoteV3.serviceNotify in service:
raise Exception("DGLAB v3.0 found, please use dglab_v3 instead")
else:
raise Exception(
"Unknown device (你自己看看你连的是什么jb设备)"
) # Sorry for my language.
self.channelA_wave_set: list[tuple[int, int, int]] = []
self.channelB_wave_set: list[tuple[int, int, int]] = []
# Initialize self.coyote
await self.get_batterylevel()
await self.get_strength()
await self.set_wave_sync(0, 0, 0, 0, 0, 0)
await self.set_strength(0, 0)
# Start the wave tasks, to keep the device functioning.
self.wave_tasks = asyncio.gather(
self._keep_wave(),
)
return self
@classmethod
async def from_address(cls, address: str) -> "dglab":
"""
从指定的地址创建一个新的郊狼实例,在需要同时连接多个设备时格外好用。
Creates a new instance of the 'dglab' class using the specified address.
Args:
address (str): The address to connect to.
Returns:
dglab: An instance of the 'dglab' class.
"""
return cls(address)
async def get_batterylevel(self) -> int:
"""
读取郊狼设备剩余电量,小心没电导致的寸止哦:)
Retrieves the battery level from the device.
Returns:
int: The battery level as an integer value.
"""
value = await v2.get_batterylevel_(self.client, self.characteristics)
value = value[0]
logger.debug(f"Received battery level: {value}")
self.coyote.Battery = int(value)
return self.coyote.Battery
async def get_strength(self) -> Tuple[int, int]:
"""
读取郊狼当前强度。
Retrieves the strength of the device.
Returns:
Tuple[int, int]: 通道A强度,通道B强度
"""
value = await v2.get_strength_(self.client, self.characteristics)
logger.debug(f"Received strength: A: {value[0]}, B: {value[1]}")
self.coyote.ChannelA.strength = int(value[0])
self.coyote.ChannelB.strength = int(value[1])
return self.coyote.ChannelA.strength, self.coyote.ChannelB.strength
async def set_strength(self, strength: int, channel: model_v2.ChannelA | model_v2.ChannelB) -> None:
"""
设置电压强度。
额外设置这个函数用于单独调整强度只是为了和设置波形的函数保持一致罢了。
Set the strength of the device.
Args:
strength (int): 电压强度
channel (ChannelA | ChannelB): 对手频道
Returns:
int: 电压强度
"""
if channel is model_v2.ChannelA:
self.coyote.ChannelA.strength = strength
elif channel is model_v2.ChannelB:
self.coyote.ChannelB.strength = strength
r = await v2.set_strength_(self.client, self.coyote, self.characteristics)
logger.debug(f"Set strength response: {r}")
return (
self.coyote.ChannelA.strength
if channel is model_v2.ChannelA
else self.coyote.ChannelB.strength
)
async def set_strength_sync(self, strengthA: int, strengthB: int) -> None:
"""
同步设置电流强度。
这是正道。
Set the strength of the device synchronously.
Args:
strengthA (int): 通道A电压强度
strengthB (int): 通道B电压强度
Returns:
(int, int): A通道强度,B通道强度
"""
self.coyote.ChannelA.strength = strengthA
self.coyote.ChannelB.strength = strengthB
r = await v2.set_strength_(self.client, self.coyote, self.characteristics)
logger.debug(f"Set strength response: {r}")
return self.coyote.ChannelA.strength, self.coyote.ChannelB.strength
"""
How wave set works:
1. Set the wave set for channel A and channel B.
2. The wave set will be looped indefinitely by
wave_set_handler, and change the value in
self.coyote.ChannelN.waveN.
"""
async def set_wave_set(
self, wave_set: list[tuple[int, int, int]], channel: model_v2.ChannelA | model_v2.ChannelB
) -> None:
"""
设置波形组,也就是所谓“不断变化的波形”。
Set the wave set for the device.
Args:
wave_set (list[tuple[int, int, int]]): 波形组
channel (ChannelA | ChannelB): 对手通道
Returns:
None: None
"""
if channel is model_v2.ChannelA:
self.channelA_wave_set = wave_set
elif channel is model_v2.ChannelB:
self.channelB_wave_set = wave_set
return None
async def set_wave_set_sync(
self,
wave_setA: list[tuple[int, int, int]],
wave_setB: list[tuple[int, int, int]],
) -> None:
"""
同步设置波形组。
Set the wave set for the device synchronously.
Args:
wave_setA (list[tuple[int, int, int]]): 通道A波形组
wave_setB (list[tuple[int, int, int]]): 通道B波形组
Returns:
None: None
"""
self.channelA_wave_set = wave_setA
self.channelB_wave_set = wave_setB
return None
"""
How set_wave works:
Basically, it will generate a wave set with only one wave,
and changes the value in self.hannelN_wave_set.
All the wave changes will be applied to the device by wave_set.
"""
async def set_wave(
self, waveX: int, waveY: int, waveZ: int, channel: model_v2.ChannelA | model_v2.ChannelB
) -> Tuple[int, int, int]:
"""
设置波形。
枯燥,乏味,感觉不如。。。
Set the wave for the device.
Args:
waveX (int): 连续发出X个脉冲,每个脉冲持续1ms
waveY (int): 发出脉冲后停止Y个周期,每个周期持续1ms
waveZ (int): 每个脉冲的宽度为Z*5us
channel (ChannelA | ChannelB): 对手通道
Returns:
Tuple[int, int, int]: 波形
"""
if channel is model_v2.ChannelA:
self.channelA_wave_set = [(waveX, waveY, waveZ)]
elif channel is model_v2.ChannelB:
self.channelB_wave_set = [(waveX, waveY, waveZ)]
return waveX, waveY, waveZ
async def set_wave_sync(
self,
waveX_A: int,
waveY_A: int,
waveZ_A: int,
waveX_B: int,
waveY_B: int,
waveZ_B: int,
) -> Tuple[int, int, int, int, int, int]:
"""
同步设置波形。
Set the wave for the device synchronously.
Args:
waveX_A (int): 通道A,连续发出X个脉冲,每个脉冲持续1ms
waveY_A (int): 通道A,发出脉冲后停止Y个周期,每个周期持续1ms
waveZ_A (int): 通道A,每个脉冲的宽度为Z
waveX_B (int): 通道B,连续发出X个脉冲,每个脉冲持续1ms
waveY_B (int): 通道B,发出脉冲后停止Y个周期,每个周期持续1ms
waveZ_B (int): 通道B,每个脉冲的宽度为Z
Returns:
Tuple[Tuple[int, int, int], Tuple[int, int, int]]: A通道波形,B通道波形
"""
self.channelA_wave_set = [(waveX_A, waveY_A, waveZ_A)]
self.channelB_wave_set = [(waveX_B, waveY_B, waveZ_B)]
r = await v2.set_wave_(
self.client, self.coyote.ChannelA, self.characteristics
), await v2.set_wave_(self.client, self.coyote.ChannelB, self.characteristics)
return (waveX_A, waveY_A, waveZ_A), (waveX_B, waveY_B, waveZ_B)
def _channelA_wave_set_handler(self):
"""
Do not use this function directly.
Yep this is how wave set works :)
PR if you have a better solution.
"""
while True:
for wave in self.channelA_wave_set:
self.coyote.ChannelA.waveX = wave[0]
self.coyote.ChannelA.waveY = wave[1]
self.coyote.ChannelA.waveZ = wave[2]
yield (None)
def _channelB_wave_set_handler(self):
"""
Do not use this function directly.
Yep this is how wave set works :)
PR if you have a better solution.
"""
while True:
for wave in self.channelB_wave_set:
self.coyote.ChannelB.waveX = wave[0]
self.coyote.ChannelB.waveY = wave[1]
self.coyote.ChannelB.waveZ = wave[2]
yield (None)
async def _keep_wave(self) -> None:
"""
Don't use this function directly.
"""
last_time = time.time()
ChannelA_keeping = self._channelA_wave_set_handler()
ChannelB_keeping = self._channelB_wave_set_handler()
while True:
try:
# logger.debug(f"Time elapsed: {time.time() - last_time}")
if time.time() - last_time >= 0.1:
# Record time for loop
last_time = time.time()
r = await v2.set_wave_(
self.client, self.coyote.ChannelA, self.characteristics
), await v2.set_wave_(
self.client, self.coyote.ChannelB, self.characteristics
)
logger.debug(f"Set wave response: {r}")
next(ChannelA_keeping)
next(ChannelB_keeping)
except asyncio.exceptions.CancelledError:
logger.error("Cancelled error")
break
return None
async def close(self):
"""
郊狼虽好,可不要贪杯哦。
Close the connection to the device.
Returns:
None: None
"""
try:
self.wave_tasks.cancel()
await self.wave_tasks
except asyncio.CancelledError or asyncio.exceptions.InvalidStateError:
pass
await self.client.disconnect()
return None
class dglab_v3(object):
coyote = model_v3.Coyote()
def __init__(self, address: str = None) -> None:
self.address = address
return None
async def create(self) -> "dglab_v3":
"""
建立郊狼连接并初始化。
Creates a connection to the DGLAB device and initialize.
Returns:
dglab: The initialized DGLAB object.
Raises:
Exception: If the device is not supported or if an unknown device is connected.
"""
if self.address is None:
# If address is not provided, scan for it.
self.address = await v3.scan_()
# Connect to the device.
logger.debug(f"Connecting to {self.address}")
self.client = BleakClient(self.address, timeout=20.0)
await self.client.connect()
# Wait for a second to allow service discovery to complete
await asyncio.sleep(1)
# Check if the device is valid.
services = self.client.services
service = [service.uuid for service in services]
logger.debug(f"Got services: {str(service)}")
if CoyoteV2.serviceBattery in service and CoyoteV2.serviceEStim in service:
raise Exception("DGLAB v2.0 found, please use dglab instead")
elif CoyoteV3.serviceWrite in service and CoyoteV3.serviceNotify in service:
logger.info("Connected to DGLAB v3.0")
# Update BleakGATTCharacteristic into characteristics list, to optimize performence.
self.characteristics = CoyoteV3
logger.debug(f"Got characteristics: {str(self.characteristics)}")
for i in self.client.services.characteristics.values():
if i.uuid == self.characteristics.characteristicWrite:
self.characteristics.characteristicWrite = i
elif i.uuid == self.characteristics.characteristicNotify:
self.characteristics.characteristicNotify = i
else:
raise Exception(
"Unknown device (你自己看看你连的是什么jb设备)"
) # Sorry for my language.
self.channelA_wave_set: list[tuple[int, int, int]] = []
self.channelB_wave_set: list[tuple[int, int, int]] = []
# Initialize notify
await v3.notify_(self.client, self.characteristics, self.notify_callback)
# Initialize self.coyote
self.coyote.ChannelA.limit = 200
self.coyote.ChannelB.limit = 200
self.coyote.ChannelA.coefficientStrenth = 100
self.coyote.ChannelB.coefficientStrenth = 100
self.coyote.ChannelA.coefficientFrequency = 100
self.coyote.ChannelB.coefficientFrequency = 100
await self.set_coefficient(200, 100, 100, model_v3.ChannelA)
await self.set_coefficient(200, 100, 100, model_v3.ChannelB)
await self.set_wave_sync(0, 0, 0, 0, 0, 0)
await self.set_strength_sync(0, 0)
# Start the wave tasks, to keep the device functioning.
self.wave_tasks = asyncio.gather(
self._retainer(),
)
return self
@classmethod
async def from_address(cls, address: str) -> "dglab_v3":
"""
从指定的地址创建一个新的郊狼实例,在需要同时连接多个设备时格外好用。
Creates a new instance of the 'dglab' class using the specified address.
Args:
address (str): The address to connect to.
Returns:
dglab: An instance of the 'dglab' class.
"""
return cls(address)
async def notify_callback(self, sender: BleakGATTCharacteristic, data: bytearray):
logger.debug(f"{sender}: {data}")
if data[0] == 0xB1:
# self.coyote.ChannelA.strength = int(data[2])
# self.coyote.ChannelB.strength = int(data[3])
logger.debug(f"Getting bytes(0xB1): {data.hex()} , which is {data}")
if data[0] == 0xBE:
# self.coyote.ChannelA.limit = int(data[1])
# self.coyote.ChannelB.limit = int(data[2])
# self.coyote.ChannelA.coefficientFrequency = int(data[3])
# self.coyote.ChannelB.coefficientFrequency = int(data[4])
# self.coyote.ChannelA.coefficientStrenth = int(data[5])
# self.coyote.ChannelB.coefficientStrenth = int(data[6])
logger.debug(f"Getting bytes(0xBE): {data.hex()} , which is {data}")
async def get_strength(self) -> Tuple[int, int]:
"""
读取郊狼当前强度。
Retrieves the strength of the device.
Returns:
Tuple[int, int]: 通道A强度,通道B强度
"""
return self.coyote.ChannelA.strength, self.coyote.ChannelB.strength
async def set_strength(self, strength: int, channel: model_v3.ChannelA | model_v3.ChannelB) -> None:
"""
设置电压强度。
额外设置这个函数用于单独调整强度只是为了和设置波形的函数保持一致罢了。
Set the strength of the device.
Args:
strength (int): 电压强度
channel (ChannelA | ChannelB): 对手频道
Returns:
int: 电压强度
"""
if channel is model_v3.ChannelA:
self.coyote.ChannelA.strength = strength
elif channel is model_v3.ChannelB:
self.coyote.ChannelB.strength = strength
return (
self.coyote.ChannelA.strength
if channel is model_v3.ChannelA
else self.coyote.ChannelB.strength
)
async def set_coefficient(
self,
strength_limit: int,
strength_coefficient: int,
frequency_coefficient: int,
channel: model_v3.ChannelA | model_v3.ChannelB,
) -> None:
"""
设置强度上线与平衡常数。
Set the strength limit and coefficient of the device.
Args:
strength_limit (int): 电压强度上限
strength_coefficient (int): 强度平衡常数
frequency_coefficient (int): 频率平衡常数
channel (ChannelA | ChannelB): 对手频道
Returns:
Tuple[int, int, int]: 电压强度上限,强度平衡常数,频率平衡常数
"""
if channel is model_v3.ChannelA:
self.coyote.ChannelA.limit = strength_limit
self.coyote.ChannelA.coefficientStrenth = strength_coefficient
self.coyote.ChannelA.coefficientFrequency = frequency_coefficient
elif channel is model_v3.ChannelB:
self.coyote.ChannelB.limit = strength_limit
self.coyote.ChannelB.coefficientStrenth = strength_coefficient
self.coyote.ChannelB.coefficientFrequency = frequency_coefficient
await v3.write_coefficient_(self.client, self.coyote, self.characteristics)
return (
(
self.coyote.ChannelA.limit,
self.coyote.ChannelA.coefficientStrenth,
self.coyote.ChannelA.coefficientFrequency,
)
if channel is model_v3.ChannelA
else (
self.coyote.ChannelB.limit,
self.coyote.ChannelB.coefficientStrenth,
self.coyote.ChannelB.coefficientFrequency,
)
)
async def set_strength_sync(self, strengthA: int, strengthB: int) -> None:
"""
同步设置电流强度。
这是正道。
Set the strength of the device synchronously.
Args:
strengthA (int): 通道A电压强度
strengthB (int): 通道B电压强度
Returns:
(int, int): A通道强度,B通道强度
"""
self.coyote.ChannelA.strength = strengthA
self.coyote.ChannelB.strength = strengthB
return self.coyote.ChannelA.strength, self.coyote.ChannelB.strength
"""
How wave set works:
1. Set the wave set for channel A and channel B.
2. The wave set will be looped indefinitely by
wave_set_handler, and change the value in
self.coyote.ChannelN.waveN.
"""
async def set_wave_set(
self, wave_set: list[tuple[int, int, int]], channel: model_v3.ChannelA | model_v3.ChannelB
) -> None:
"""
设置波形组,也就是所谓“不断变化的波形”。
Set the wave set for the device.
Args:
wave_set (list[tuple[int, int, int]]): 波形组
channel (ChannelA | ChannelB): 对手通道
Returns:
None: None
"""
if channel is model_v3.ChannelA:
self.channelA_wave_set = wave_set
elif channel is model_v3.ChannelB:
self.channelB_wave_set = wave_set
return None
async def set_wave_set_sync(
self,
wave_setA: list[tuple[int, int, int]],
wave_setB: list[tuple[int, int, int]],
) -> None:
"""
同步设置波形组。
Set the wave set for the device synchronously.
Args:
wave_setA (list[tuple[int, int, int]]): 通道A波形组
wave_setB (list[tuple[int, int, int]]): 通道B波形组
Returns:
None: None
"""
self.channelA_wave_set = wave_setA
self.channelB_wave_set = wave_setB
return None
def waveset_converter(
self, wave_set: list[tuple[int, int, int]]
) -> tuple[int, int]:
"""
Convert the wave set to the correct format.
"""
freq = int((((wave_set[0] + wave_set[1]) - 10) / 990) * 230 + 10)
strenth = int(wave_set[2] * 5)
return freq, strenth
"""
How set_wave works:
Basically, it will generate a wave set with only one wave,
and changes the value in self.hannelN_wave_set.
All the wave changes will be applied to the device by wave_set.
"""
async def set_wave(
self, waveX: int, waveY: int, waveZ: int, channel: model_v3.ChannelA | model_v3.ChannelB
) -> Tuple[int, int, int]:
"""
设置波形。
枯燥,乏味,感觉不如。。。
Set the wave for the device.
Args:
waveX (int): 连续发出X个脉冲,每个脉冲持续1ms
waveY (int): 发出脉冲后停止Y个周期,每个周期持续1ms
waveZ (int): 每个脉冲的宽度为Z*5us
channel (ChannelA | ChannelB): 对手通道
Returns:
Tuple[int, int, int]: 波形
"""
if channel is model_v3.ChannelA:
self.channelA_wave_set = [(waveX, waveY, waveZ)]
elif channel is model_v3.ChannelB:
self.channelB_wave_set = [(waveX, waveY, waveZ)]
return waveX, waveY, waveZ
async def set_wave_sync(
self,
waveX_A: int,
waveY_A: int,
waveZ_A: int,
waveX_B: int,
waveY_B: int,
waveZ_B: int,
) -> Tuple[int, int, int, int, int, int]:
"""
同步设置波形。
Set the wave for the device synchronously.
Args:
waveX_A (int): 通道A,连续发出X个脉冲,每个脉冲持续1ms
waveY_A (int): 通道A,发出脉冲后停止Y个周期,每个周期持续1ms
waveZ_A (int): 通道A,每个脉冲的宽度为Z
waveX_B (int): 通道B,连续发出X个脉冲,每个脉冲持续1ms
waveY_B (int): 通道B,发出脉冲后停止Y个周期,每个周期持续1ms
waveZ_B (int): 通道B,每个脉冲的宽度为Z
Returns:
Tuple[Tuple[int, int, int], Tuple[int, int, int]]: A通道波形,B通道波形
"""
self.channelA_wave_set = [(waveX_A, waveY_A, waveZ_A)]
self.channelB_wave_set = [(waveX_B, waveY_B, waveZ_B)]
return (waveX_A, waveY_A, waveZ_A), (waveX_B, waveY_B, waveZ_B)
def _channelA_wave_set_handler(self):
"""
Do not use this function directly.
Yep this is how wave set works :)
PR if you have a better solution.
"""
try:
while True:
for wave in self.channelA_wave_set:
wave = self.waveset_converter(wave)
self.coyote.ChannelA.wave.insert(0, wave[0])
self.coyote.ChannelA.wave.pop()
self.coyote.ChannelA.waveStrenth.insert(0, wave[1])
self.coyote.ChannelA.waveStrenth.pop()
yield (None)
except asyncio.exceptions.CancelledError:
pass
def _channelB_wave_set_handler(self):
"""
Do not use this function directly.
Yep this is how wave set works :)
PR if you have a better solution.
"""
try:
while True:
for wave in self.channelB_wave_set:
wave = self.waveset_converter(wave)
self.coyote.ChannelB.wave.insert(0, wave[0])
self.coyote.ChannelB.wave.pop()
self.coyote.ChannelB.waveStrenth.insert(0, wave[1])
self.coyote.ChannelB.waveStrenth.pop()
yield (None)
except asyncio.exceptions.CancelledError:
pass
async def _retainer(self) -> None:
"""
Don't use this function directly.
"""
ChannelA_keeping = self._channelA_wave_set_handler()
ChannelB_keeping = self._channelB_wave_set_handler()
last_time = time.time()
while True:
if time.time() - last_time >= 0.1:
# Record time for loop
last_time = time.time()
logger.debug(
f"Using wave: {self.coyote.ChannelA.wave}, {self.coyote.ChannelA.waveStrenth}, {self.coyote.ChannelB.wave}, {self.coyote.ChannelB.waveStrenth}"
)
r = await v3.write_strenth_(
self.client, self.coyote, self.characteristics
)
logger.debug(f"Retainer response: {r}")
next(ChannelA_keeping)
next(ChannelB_keeping)
return None
async def close(self) -> None:
"""
郊狼虽好,可不要贪杯哦。
Close the connection to the device.
Returns:
None: None
"""
try:
self.wave_tasks.cancel()
await self.wave_tasks
except asyncio.CancelledError or asyncio.exceptions.InvalidStateError:
pass
await self.client.disconnect()
return None
| 27,276 | Python | .py | 639 | 29.406886 | 163 | 0.590551 | shilapi/DGLAB-python-driver | 8 | 2 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,922 | input_accordion_highlight.py | w-e-w_sd-webui-input-accordion-highlight/scripts/input_accordion_highlight.py | import gradio as gr
from modules import shared, ui_components
shared.options_templates.update(
shared.options_section(
('ui', 'User interface'),
{
'sd_webui_input_accordion_activate_color_light':
shared.OptionInfo(
'#e12885',
'InputAccordion highlight - Text color - Light mode', ui_components.FormColorPicker
).needs_reload_ui(),
'sd_webui_input_accordion_activate_shadow_light':
shared.OptionInfo(
'#e12885',
'InputAccordion highlight - Border color - Light mode', ui_components.FormColorPicker
).needs_reload_ui(),
'sd_webui_input_accordion_activate_shadow_opacity_light':
shared.OptionInfo(
0.5,
'InputAccordion highlight - Border opacity - Light mode', gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}
).needs_reload_ui(),
'sd_webui_input_accordion_activate_color_dark':
shared.OptionInfo(
'#86cecb',
'InputAccordion highlight - Text color - Dark mode', ui_components.FormColorPicker
).needs_reload_ui(),
'sd_webui_input_accordion_activate_shadow_dark':
shared.OptionInfo(
'#86cecb',
'InputAccordion highlight - Border color - Dark mode', ui_components.FormColorPicker
).needs_reload_ui(),
'sd_webui_input_accordion_activate_shadow_opacity_dark':
shared.OptionInfo(
0.5,
'InputAccordion highlight - Border opacity - Dark mode', gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}
).needs_reload_ui(),
}
)
)
shared.gradio_theme.sd_webui_input_accordion_activate_color = shared.opts.sd_webui_input_accordion_activate_color_light
shared.gradio_theme.sd_webui_input_accordion_activate_color_dark = shared.opts.sd_webui_input_accordion_activate_color_dark
shared.gradio_theme.sd_webui_input_accordion_activate_shadow_color = shared.opts.sd_webui_input_accordion_activate_shadow_light + hex(int(shared.opts.sd_webui_input_accordion_activate_shadow_opacity_light * 255))[2:].zfill(2)
shared.gradio_theme.sd_webui_input_accordion_activate_shadow_color_dark = shared.opts.sd_webui_input_accordion_activate_shadow_dark + hex(int(shared.opts.sd_webui_input_accordion_activate_shadow_opacity_dark * 255))[2:].zfill(2)
| 2,562 | Python | .py | 43 | 46.023256 | 228 | 0.619085 | w-e-w/sd-webui-input-accordion-highlight | 8 | 0 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,923 | settings.py | The-Golden-At-SRL_CatLendar/settings.py | from enum import Enum
from cat.mad_hatter.decorators import plugin
from pydantic import BaseModel, Field, field_validator
class Languages(Enum):
English = "English"
Italian = "Italian"
class GenerateContext(Enum):
Yes = True
No = False
class MySettings(BaseModel):
language: Languages = Languages.English
generate_context: GenerateContext = GenerateContext.Yes
@plugin
def settings_model():
return MySettings | 452 | Python | .py | 15 | 26.266667 | 59 | 0.779621 | The-Golden-At-SRL/CatLendar | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,924 | CatLendar.py | The-Golden-At-SRL_CatLendar/CatLendar.py | from pydantic import BaseModel, create_model
import os
import json
from cat.experimental.form import form, CatForm, CatFormState
from cat.log import log
from .calendar import getAvailableDates, bookDate
@form
class CalendarBookingForm(CatForm):
description = "Book an appointment from those available"
jsonPath = os.path.join(
os.path.dirname(__file__), "fields.json"
)
# Import fields structure
with open(jsonPath, "r") as jsonFile:
initJson = json.load(jsonFile)
# Add booking date
initJson["bookingDate"] = "str"
# Create a dictionary of field names and types from initJson
fields_dict = {key: (value, ...) for key, value in initJson.items()}
# Dynamically create a Pydantic model
CalendarBooking = create_model('CalendarBooking', **fields_dict)
log.debug(CalendarBooking.schema_json(indent=2))
model_class = CalendarBooking
start_examples = [
"I want to book an appointment",
]
stop_examples = [
"Cancel reservation"
]
# It gives some problems with the current version
ask_confirm = False
# Override because current version is experimental and provides a dev output
def message(self):
# Get settings
settings = self.cat.mad_hatter.get_plugin().load_settings()
lang = settings["language"]
if self._state == CatFormState.CLOSED:
return {
"output": f"Form {type(self).__name__} closed"
}
separator = "\n - "
missing_fields = ""
out = ""
next_request = ""
missing_fields_list = []
if self._missing_fields:
missing_fields = "\nMissing fields:"
missing_fields += separator + separator.join(self._missing_fields)
# missing_fields is a str, we want a list
for missing in self._missing_fields:
log.debug(f"MISSING: {missing}")
missing_fields_list.append(missing)
# A little bit hardcoded, but it's the fastest way
if missing_fields_list[0] == "bookingDate":
# Get available dates
availableDates = getAvailableDates()
log.debug(availableDates)
availablePrompt = f"""Your task is to propose to the user the availble dates for booking an appointment. You should ask the question in {lang}.
The available dates are the following:
```json
{json.dumps(availableDates)}
```
Write a list to the user that summerise the available dates.
"""
availablePromptEscaped = availablePrompt.replace("{", "{{").replace("}", "}}")
response = self.cat.llm(availablePromptEscaped)
log.debug(response)
out = response
else:
# Here we want to generate a request phrase
prompt = f"""Your task is to ask the user for an information. The information that you must ask is '{missing_fields_list[0]}'. You should ask the question in {lang}."""
response = self.cat.llm(prompt)
log.debug(f"RESPONSE: {response}")
out = response
log.debug(missing_fields_list)
log.debug(f"Output: {missing_fields_list[0]}")
invalid_fields = ""
if self._errors:
invalid_fields = "\nInvalid fields:"
invalid_fields += separator + separator.join(self._errors)
if self._state == CatFormState.WAIT_CONFIRM:
# Generate confirm phrase
prompt = f"""Your task is to ask the user to confirm the choosen date for the appointment. The user shoul answer with yes or no.
The choosen date is '{self._model["bookingDate"]}'. You should ask the question in {lang}."""
out = self.cat.llm(prompt)
return {
"output": out
}
def submit(self, form_data):
# Get settings
settings = self.cat.mad_hatter.get_plugin().load_settings()
lang = settings["language"]
generate_context = settings["generate_context"]
# Separate date and hour from user result
availableDates = getAvailableDates()
datePrompt = f"""Your task is to produce a JSON representing the informations that the user has given to you.
JSON must be in this format:
```json
{{
"date": // type string or null, must contain a date in the format dd/mm/yyyy
"time" // type string or null, must contain a time in the format hh:mm
}}
```
The date and the time choosen by the user must be in the following JSON:
```json
{json.dumps(availableDates)}
```
User said: "{form_data["bookingDate"]}"
```json
"""
response = self.cat.llm(datePrompt)
log.debug(response)
# Clear response
response.replace("json", "").replace("`", "")
dateJson = json.loads(response)
log.debug(dateJson)
# Generate chat context
context = ""
if generate_context:
history = getattr(self.cat, "working_memory").history[:(-len(form_data) * 2)]
history_string = ""
for turn in history:
history_string += f"\n - {turn['who']}: {turn['message']}"
log.debug(history_string)
contextPrompt = f"""The user has booked an appointment. Your task is to give a title to the appointment based on the chat that you had before the booking with the user.
The title should be in {lang}.
The history of your chat with the user is:
\"{history_string}\""""
context = self.cat.llm(contextPrompt)
# Book it
bookDate(dateJson["date"], dateJson["time"], context, form_data)
# Generate final phrase
prompt = f"""Your task is to tell the user that his appointment has been booked. You should write the phrase in {lang}."""
out = self.cat.llm(prompt)
return {
"output": out
}
| 6,506 | Python | .py | 137 | 34.686131 | 184 | 0.600698 | The-Golden-At-SRL/CatLendar | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,925 | calendar.py | The-Golden-At-SRL_CatLendar/calendar.py | import os
import csv
from cat.log import log
csvPath = os.path.join(
os.path.dirname(__file__), "calendar.csv"
)
# Returns available dates
def getAvailableDates():
allDates = []
with open(csvPath, "r") as csvFile:
csvCalendar = csv.DictReader(csvFile)
for row in csvCalendar:
if row["booked"] == "False":
allDates.append(row)
return allDates
# Books an appointment
def bookDate(date, hour, context, fields):
toSave = []
with open(csvPath, "r") as csvFile:
csvCalendar = csv.DictReader(csvFile)
for row in csvCalendar:
if row["date"] == date and row["hour"] == hour:
for key in fields:
row[key] = fields[key]
row["context"] = context
row["booked"] = "True"
toSave.append(row)
log.debug(toSave)
# And now save it
with open(csvPath, "w") as csvFile:
log.debug(toSave)
fieldnames = ["date", "hour"]
for key in fields:
fieldnames.append(key)
fieldnames.append("context")
fieldnames.append("booked")
log.debug(fields)
log.debug(fieldnames)
writer = csv.DictWriter(csvFile, fieldnames=fieldnames)
writer.writeheader()
for row in toSave:
writer.writerow(row)
| 1,437 | Python | .py | 42 | 24.02381 | 63 | 0.592166 | The-Golden-At-SRL/CatLendar | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,926 | scenarios.py | enriquetomasmb_nebula/nebula/scenarios.py | import glob
import hashlib
import json
import logging
import math
import os
import shutil
import subprocess
import sys
import textwrap
import time
from datetime import datetime
import docker
from nebula.addons.blockchain.blockchain_deployer import BlockchainDeployer
from nebula.addons.topologymanager import TopologyManager
from nebula.config.config import Config
from nebula.core.utils.certificate import generate_ca_certificate, generate_certificate
from nebula.frontend.utils import Utils
# Definition of a scenario
class Scenario:
"""
A class to represent a scenario.
Attributes:
scenario_title : str
Title of the scenario.
scenario_description : str
Description of the scenario.
deployment : str
Type of deployment (e.g., 'docker', 'process').
federation : str
Type of federation.
topology : str
Network topology.
nodes : dict
Dictionary of nodes.
nodes_graph : dict
Graph representation of nodes.
n_nodes : int
Number of nodes.
matrix : list
Matrix representation of the network.
dataset : str
Dataset used in the scenario.
iid : bool
Indicator if the dataset is IID.
partition_selection : str
Method of partition selection.
partition_parameter : float
Parameter for partition selection.
model : str
Model used in the scenario.
agg_algorithm : str
Aggregation algorithm.
rounds : int
Number of rounds.
logginglevel : str
Logging level.
accelerator : str
Accelerator used.
network_subnet : str
Network subnet.
network_gateway : str
Network gateway.
epochs : int
Number of epochs.
attacks : str
Type of attacks.
poisoned_node_percent : float
Percentage of poisoned nodes.
poisoned_sample_percent : float
Percentage of poisoned samples.
poisoned_noise_percent : float
Percentage of poisoned noise.
with_reputation : bool
Indicator if reputation is used.
is_dynamic_topology : bool
Indicator if topology is dynamic.
is_dynamic_aggregation : bool
Indicator if aggregation is dynamic.
target_aggregation : str
Target aggregation method.
random_geo : bool
Indicator if random geo is used.
latitude : float
Latitude for mobility.
longitude : float
Longitude for mobility.
mobility : bool
Indicator if mobility is used.
mobility_type : str
Type of mobility.
radius_federation : float
Radius of federation.
scheme_mobility : str
Scheme of mobility.
round_frequency : int
Frequency of rounds.
mobile_participants_percent : float
Percentage of mobile participants.
additional_participants : list
List of additional participants.
schema_additional_participants : str
Schema for additional participants.
"""
def __init__(
self,
scenario_title,
scenario_description,
deployment,
federation,
topology,
nodes,
nodes_graph,
n_nodes,
matrix,
dataset,
iid,
partition_selection,
partition_parameter,
model,
agg_algorithm,
rounds,
logginglevel,
accelerator,
network_subnet,
network_gateway,
epochs,
attacks,
poisoned_node_percent,
poisoned_sample_percent,
poisoned_noise_percent,
with_reputation,
is_dynamic_topology,
is_dynamic_aggregation,
target_aggregation,
random_geo,
latitude,
longitude,
mobility,
mobility_type,
radius_federation,
scheme_mobility,
round_frequency,
mobile_participants_percent,
additional_participants,
schema_additional_participants,
):
self.scenario_title = scenario_title
self.scenario_description = scenario_description
self.deployment = deployment
self.federation = federation
self.topology = topology
self.nodes = nodes
self.nodes_graph = nodes_graph
self.n_nodes = n_nodes
self.matrix = matrix
self.dataset = dataset
self.iid = iid
self.partition_selection = partition_selection
self.partition_parameter = partition_parameter
self.model = model
self.agg_algorithm = agg_algorithm
self.rounds = rounds
self.logginglevel = logginglevel
self.accelerator = accelerator
self.network_subnet = network_subnet
self.network_gateway = network_gateway
self.epochs = epochs
self.attacks = attacks
self.poisoned_node_percent = poisoned_node_percent
self.poisoned_sample_percent = poisoned_sample_percent
self.poisoned_noise_percent = poisoned_noise_percent
self.with_reputation = with_reputation
self.is_dynamic_topology = is_dynamic_topology
self.is_dynamic_aggregation = is_dynamic_aggregation
self.target_aggregation = target_aggregation
self.random_geo = random_geo
self.latitude = latitude
self.longitude = longitude
self.mobility = mobility
self.mobility_type = mobility_type
self.radius_federation = radius_federation
self.scheme_mobility = scheme_mobility
self.round_frequency = round_frequency
self.mobile_participants_percent = mobile_participants_percent
self.additional_participants = additional_participants
self.schema_additional_participants = schema_additional_participants
def attack_node_assign(
self,
nodes,
federation,
attack,
poisoned_node_percent,
poisoned_sample_percent,
poisoned_noise_percent,
):
"""Identify which nodes will be attacked"""
import random
import math
nodes_index = []
# Get the nodes index
if federation == "DFL":
nodes_index = list(nodes.keys())
else:
for node in nodes:
if nodes[node]["role"] != "server":
nodes_index.append(node)
mal_nodes_defined = any(nodes[node]["malicious"] for node in nodes)
attacked_nodes = []
if not mal_nodes_defined:
n_nodes = len(nodes_index)
# Number of attacked nodes, round up
num_attacked = int(math.ceil(poisoned_node_percent / 100 * n_nodes))
if num_attacked > n_nodes:
num_attacked = n_nodes
# Get the index of attacked nodes
attacked_nodes = random.sample(nodes_index, num_attacked)
# Assign the role of each node
for node in nodes:
node_att = "No Attack"
malicious = False
attack_sample_percent = 0
poisoned_ratio = 0
if (str(nodes[node]['id']) in attacked_nodes) or (nodes[node]["malicious"]):
malicious = True
node_att = attack
attack_sample_percent = poisoned_sample_percent / 100
poisoned_ratio = poisoned_noise_percent / 100
nodes[node]["malicious"] = malicious
nodes[node]["attacks"] = node_att
nodes[node]["poisoned_sample_percent"] = attack_sample_percent
nodes[node]["poisoned_ratio"] = poisoned_ratio
return nodes
def mobility_assign(self, nodes, mobile_participants_percent):
"""Assign mobility to nodes"""
import random
# Number of mobile nodes, round down
num_mobile = math.floor(mobile_participants_percent / 100 * len(nodes))
if num_mobile > len(nodes):
num_mobile = len(nodes)
# Get the index of mobile nodes
mobile_nodes = random.sample(list(nodes.keys()), num_mobile)
# Assign the role of each node
for node in nodes:
node_mob = False
if node in mobile_nodes:
node_mob = True
nodes[node]["mobility"] = node_mob
return nodes
@classmethod
def from_dict(cls, data):
return cls(**data)
# Class to manage the current scenario
class ScenarioManagement:
def __init__(self, scenario):
# Current scenario
self.scenario = Scenario.from_dict(scenario)
# Scenario management settings
self.start_date_scenario = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
self.scenario_name = f'nebula_{self.scenario.federation}_{datetime.now().strftime("%d_%m_%Y_%H_%M_%S")}'
self.root_path = os.environ.get("NEBULA_ROOT_HOST")
self.host_platform = os.environ.get("NEBULA_HOST_PLATFORM")
self.config_dir = os.path.join(os.environ.get("NEBULA_CONFIG_DIR"), self.scenario_name)
self.log_dir = os.environ.get("NEBULA_LOGS_DIR")
self.cert_dir = os.environ.get("NEBULA_CERTS_DIR")
self.advanced_analytics = os.environ.get("NEBULA_ADVANCED_ANALYTICS", "False") == "True"
self.config = Config(entity="scenarioManagement")
# Assign the controller endpoint
if self.scenario.deployment == "docker":
self.controller = "nebula-frontend"
else:
self.controller = f"127.0.0.1:{os.environ.get('NEBULA_FRONTEND_PORT')}"
self.topologymanager = None
self.env_path = None
self.use_blockchain = self.scenario.agg_algorithm == "BlockchainReputation"
# Create Scenario management dirs
os.makedirs(self.config_dir, exist_ok=True)
os.makedirs(os.path.join(self.log_dir, self.scenario_name), exist_ok=True)
os.makedirs(self.cert_dir, exist_ok=True)
# Give permissions to the directories
os.chmod(self.config_dir, 0o777)
os.chmod(os.path.join(self.log_dir, self.scenario_name), 0o777)
os.chmod(self.cert_dir, 0o777)
# Save the scenario configuration
scenario_file = os.path.join(self.config_dir, "scenario.json")
with open(scenario_file, "w") as f:
json.dump(scenario, f, sort_keys=False, indent=2)
os.chmod(scenario_file, 0o777)
# Save management settings
settings = {
"scenario_name": self.scenario_name,
"root_path": self.root_path,
"config_dir": self.config_dir,
"log_dir": self.log_dir,
"cert_dir": self.cert_dir,
"env": None,
"use_blockchain": self.use_blockchain,
}
settings_file = os.path.join(self.config_dir, "settings.json")
with open(settings_file, "w") as f:
json.dump(settings, f, sort_keys=False, indent=2)
os.chmod(settings_file, 0o777)
self.scenario.nodes = self.scenario.attack_node_assign(
self.scenario.nodes,
self.scenario.federation,
self.scenario.attacks,
int(self.scenario.poisoned_node_percent),
int(self.scenario.poisoned_sample_percent),
int(self.scenario.poisoned_noise_percent),
)
if self.scenario.mobility:
mobile_participants_percent = int(self.scenario.mobile_participants_percent)
self.scenario.nodes = self.scenario.mobility_assign(self.scenario.nodes, mobile_participants_percent)
else:
self.scenario.nodes = self.scenario.mobility_assign(self.scenario.nodes, 0)
# Save node settings
for node in self.scenario.nodes:
node_config = self.scenario.nodes[node]
participant_file = os.path.join(self.config_dir, f'participant_{node_config["id"]}.json')
os.makedirs(os.path.dirname(participant_file), exist_ok=True)
shutil.copy(
os.path.join(os.path.dirname(__file__), "./frontend/config/participant.json.example"),
participant_file,
)
os.chmod(participant_file, 0o777)
with open(participant_file) as f:
participant_config = json.load(f)
participant_config["network_args"]["ip"] = node_config["ip"]
participant_config["network_args"]["port"] = int(node_config["port"])
participant_config["device_args"]["idx"] = node_config["id"]
participant_config["device_args"]["start"] = node_config["start"]
participant_config["device_args"]["role"] = node_config["role"]
participant_config["device_args"]["proxy"] = node_config["proxy"]
participant_config["device_args"]["malicious"] = node_config["malicious"]
participant_config["scenario_args"]["rounds"] = int(self.scenario.rounds)
participant_config["data_args"]["dataset"] = self.scenario.dataset
participant_config["data_args"]["iid"] = self.scenario.iid
participant_config["data_args"]["partition_selection"] = self.scenario.partition_selection
participant_config["data_args"]["partition_parameter"] = self.scenario.partition_parameter
participant_config["model_args"]["model"] = self.scenario.model
participant_config["training_args"]["epochs"] = int(self.scenario.epochs)
participant_config["device_args"]["accelerator"] = self.scenario.accelerator
participant_config["device_args"]["logging"] = self.scenario.logginglevel
participant_config["aggregator_args"]["algorithm"] = self.scenario.agg_algorithm
participant_config["adversarial_args"]["attacks"] = node_config["attacks"]
participant_config["adversarial_args"]["poisoned_sample_percent"] = node_config["poisoned_sample_percent"]
participant_config["adversarial_args"]["poisoned_ratio"] = node_config["poisoned_ratio"]
participant_config["defense_args"]["with_reputation"] = self.scenario.with_reputation
participant_config["defense_args"]["is_dynamic_topology"] = self.scenario.is_dynamic_topology
participant_config["defense_args"]["is_dynamic_aggregation"] = self.scenario.is_dynamic_aggregation
participant_config["defense_args"]["target_aggregation"] = self.scenario.target_aggregation
participant_config["mobility_args"]["random_geo"] = self.scenario.random_geo
participant_config["mobility_args"]["latitude"] = self.scenario.latitude
participant_config["mobility_args"]["longitude"] = self.scenario.longitude
participant_config["mobility_args"]["mobility"] = node_config["mobility"]
participant_config["mobility_args"]["mobility_type"] = self.scenario.mobility_type
participant_config["mobility_args"]["radius_federation"] = self.scenario.radius_federation
participant_config["mobility_args"]["scheme_mobility"] = self.scenario.scheme_mobility
participant_config["mobility_args"]["round_frequency"] = self.scenario.round_frequency
with open(participant_file, "w") as f:
json.dump(participant_config, f, sort_keys=False, indent=2)
@staticmethod
def stop_blockchain():
if sys.platform == "win32":
try:
# Comando adaptado para PowerShell en Windows
command = "docker ps -a --filter 'label=com.docker.compose.project=blockchain' --format '{{.ID}}' | ForEach-Object { docker rm --force --volumes $_ } | Out-Null"
os.system(f'powershell.exe -Command "{command}"')
except Exception as e:
logging.error("Error while killing docker containers: {}".format(e))
else:
try:
process = subprocess.Popen("docker ps -a --filter 'label=com.docker.compose.project=blockchain' --format '{{.ID}}' | xargs -n 1 docker rm --force --volumes >/dev/null 2>&1", shell=True)
process.wait()
except subprocess.CalledProcessError:
logging.error("Docker Compose failed to stop blockchain or blockchain already exited.")
@staticmethod
def stop_participants():
# When stopping the nodes, we need to remove the current_scenario_commands.sh file -> it will cause the nodes to stop using PIDs
try:
nebula_config_dir = os.environ.get("NEBULA_CONFIG_DIR")
if not nebula_config_dir:
current_dir = os.path.dirname(__file__)
nebula_base_dir = os.path.abspath(os.path.join(current_dir, ".."))
nebula_config_dir = os.path.join(nebula_base_dir, "app", "config")
logging.info(f"NEBULA_CONFIG_DIR not found. Using default path: {nebula_config_dir}")
if os.environ.get("NEBULA_HOST_PLATFORM") == "windows":
scenario_commands_file = os.path.join(nebula_config_dir, "current_scenario_commands.ps1")
else:
scenario_commands_file = os.path.join(nebula_config_dir, "current_scenario_commands.sh")
if os.path.exists(scenario_commands_file):
os.remove(scenario_commands_file)
except Exception as e:
logging.error(f"Error while removing current_scenario_commands.sh file: {e}")
if sys.platform == "win32":
try:
# kill all the docker containers which contain the word "nebula-core"
commands = [
"""docker kill $(docker ps -q --filter ancestor=nebula-core) | Out-Null""",
"""docker rm $(docker ps -a -q --filter ancestor=nebula-core) | Out-Null""",
"""docker network rm $(docker network ls | Where-Object { ($_ -split '\s+')[1] -like 'nebula-net-scenario' } | ForEach-Object { ($_ -split '\s+')[0] }) | Out-Null""",
]
for command in commands:
time.sleep(1)
exit_code = os.system(f'powershell.exe -Command "{command}"')
# logging.info(f"Windows Command '{command}' executed with exit code: {exit_code}")
except Exception as e:
raise Exception("Error while killing docker containers: {}".format(e))
else:
try:
commands = [
"""docker kill $(docker ps -q --filter ancestor=nebula-core) > /dev/null 2>&1""",
"""docker rm $(docker ps -a -q --filter ancestor=nebula-core) > /dev/null 2>&1""",
"""docker network rm $(docker network ls | grep nebula-net-scenario | awk '{print $1}') > /dev/null 2>&1""",
]
for command in commands:
time.sleep(1)
exit_code = os.system(command)
# logging.info(f"Linux Command '{command}' executed with exit code: {exit_code}")
except Exception as e:
raise Exception("Error while killing docker containers: {}".format(e))
@staticmethod
def stop_nodes():
logging.info("Closing NEBULA nodes... Please wait")
ScenarioManagement.stop_participants()
ScenarioManagement.stop_blockchain()
def load_configurations_and_start_nodes(self, additional_participants=None, schema_additional_participants=None):
logging.info("Generating the scenario {} at {}".format(self.scenario_name, self.start_date_scenario))
# Generate CA certificate
generate_ca_certificate(dir_path=self.cert_dir)
# Get participants configurations
participant_files = glob.glob("{}/participant_*.json".format(self.config_dir))
participant_files.sort()
if len(participant_files) == 0:
raise ValueError("No participant files found in config folder")
self.config.set_participants_config(participant_files)
self.n_nodes = len(participant_files)
logging.info("Number of nodes: {}".format(self.n_nodes))
self.topologymanager = self.create_topology(matrix=self.scenario.matrix) if self.scenario.matrix else self.create_topology()
# Update participants configuration
is_start_node = False
config_participants = []
for i in range(self.n_nodes):
with open(f"{self.config_dir}/participant_" + str(i) + ".json") as f:
participant_config = json.load(f)
participant_config["scenario_args"]["federation"] = self.scenario.federation
participant_config["scenario_args"]["n_nodes"] = self.n_nodes
participant_config["network_args"]["neighbors"] = self.topologymanager.get_neighbors_string(i)
participant_config["scenario_args"]["name"] = self.scenario_name
participant_config["scenario_args"]["start_time"] = self.start_date_scenario
participant_config["device_args"]["idx"] = i
participant_config["device_args"]["uid"] = hashlib.sha1((str(participant_config["network_args"]["ip"]) + str(participant_config["network_args"]["port"]) + str(self.scenario_name)).encode()).hexdigest()
if participant_config["mobility_args"]["random_geo"]:
(
participant_config["mobility_args"]["latitude"],
participant_config["mobility_args"]["longitude"],
) = TopologyManager.get_coordinates(random_geo=True)
# If not, use the given coordinates in the frontend
participant_config["tracking_args"]["local_tracking"] = "advanced" if self.advanced_analytics else "basic"
participant_config["tracking_args"]["log_dir"] = self.log_dir
participant_config["tracking_args"]["config_dir"] = self.config_dir
# Generate node certificate
keyfile_path, certificate_path = generate_certificate(dir_path=self.cert_dir, node_id=f"participant_{i}", ip=participant_config["network_args"]["ip"])
participant_config["security_args"]["certfile"] = certificate_path
participant_config["security_args"]["keyfile"] = keyfile_path
if participant_config["device_args"]["start"]:
if not is_start_node:
is_start_node = True
else:
raise ValueError("Only one node can be start node")
with open(f"{self.config_dir}/participant_" + str(i) + ".json", "w") as f:
json.dump(participant_config, f, sort_keys=False, indent=2)
config_participants.append((participant_config["network_args"]["ip"], participant_config["network_args"]["port"], participant_config["device_args"]["role"]))
if not is_start_node:
raise ValueError("No start node found")
self.config.set_participants_config(participant_files)
# Add role to the topology (visualization purposes)
self.topologymanager.update_nodes(config_participants)
self.topologymanager.draw_graph(path=f"{self.log_dir}/{self.scenario_name}/topology.png", plot=False)
# Include additional participants (if any) as copies of the last participant
additional_participants_files = []
if additional_participants:
last_participant_file = participant_files[-1]
last_participant_index = len(participant_files)
for i, additional_participant in enumerate(additional_participants):
additional_participant_file = f"{self.config_dir}/participant_{last_participant_index + i}.json"
shutil.copy(last_participant_file, additional_participant_file)
with open(additional_participant_file) as f:
participant_config = json.load(f)
participant_config["scenario_args"]["n_nodes"] = self.n_nodes + i + 1
participant_config["device_args"]["idx"] = last_participant_index + i
participant_config["network_args"]["neighbors"] = ""
participant_config["network_args"]["ip"] = participant_config["network_args"]["ip"].rsplit(".", 1)[0] + "." + str(int(participant_config["network_args"]["ip"].rsplit(".", 1)[1]) + 1)
participant_config["device_args"]["uid"] = hashlib.sha1((str(participant_config["network_args"]["ip"]) + str(participant_config["network_args"]["port"]) + str(self.scenario_name)).encode()).hexdigest()
participant_config["mobility_args"]["additional_node"]["status"] = True
participant_config["mobility_args"]["additional_node"]["round_start"] = additional_participant["round"]
with open(additional_participant_file, "w") as f:
json.dump(participant_config, f, sort_keys=False, indent=2)
additional_participants_files.append(additional_participant_file)
if additional_participants_files:
self.config.add_participants_config(additional_participants_files)
if self.scenario.deployment in ["docker", "process"]:
if self.use_blockchain:
self.start_blockchain()
if self.scenario.deployment == "docker":
self.start_nodes_docker()
else:
self.start_nodes_process()
else:
logging.info(f"Virtualization mode is disabled for scenario '{self.scenario_name}' with {self.n_nodes} nodes. Waiting for nodes to start manually...")
def create_topology(self, matrix=None):
import numpy as np
if matrix is not None:
if self.n_nodes > 2:
topologymanager = TopologyManager(
topology=np.array(matrix),
scenario_name=self.scenario_name,
n_nodes=self.n_nodes,
b_symmetric=True,
undirected_neighbor_num=self.n_nodes - 1,
)
else:
topologymanager = TopologyManager(
topology=np.array(matrix),
scenario_name=self.scenario_name,
n_nodes=self.n_nodes,
b_symmetric=True,
undirected_neighbor_num=2,
)
elif self.scenario.topology == "fully":
# Create a fully connected network
topologymanager = TopologyManager(
scenario_name=self.scenario_name,
n_nodes=self.n_nodes,
b_symmetric=True,
undirected_neighbor_num=self.n_nodes - 1,
)
topologymanager.generate_topology()
elif self.scenario.topology == "ring":
# Create a partially connected network (ring-structured network)
topologymanager = TopologyManager(scenario_name=self.scenario_name, n_nodes=self.n_nodes, b_symmetric=True)
topologymanager.generate_ring_topology(increase_convergence=True)
elif self.scenario.topology == "random":
# Create network topology using topology manager (random)
topologymanager = TopologyManager(
scenario_name=self.scenario_name,
n_nodes=self.n_nodes,
b_symmetric=True,
undirected_neighbor_num=3,
)
topologymanager.generate_topology()
elif self.scenario.topology == "star" and self.scenario.federation == "CFL":
# Create a centralized network
topologymanager = TopologyManager(scenario_name=self.scenario_name, n_nodes=self.n_nodes, b_symmetric=True)
topologymanager.generate_server_topology()
else:
raise ValueError("Unknown topology type: {}".format(self.scenario.topology))
# Assign nodes to topology
nodes_ip_port = []
self.config.participants.sort(key=lambda x: x["device_args"]["idx"])
for i, node in enumerate(self.config.participants):
nodes_ip_port.append(
(
node["network_args"]["ip"],
node["network_args"]["port"],
"undefined",
)
)
topologymanager.add_nodes(nodes_ip_port)
return topologymanager
def start_blockchain(self):
BlockchainDeployer(config_dir=f"{self.config_dir}/blockchain", input_dir="/nebula/nebula/addons/blockchain")
try:
logging.info("Blockchain is being deployed")
subprocess.check_call(["docker", "compose", "-f", f"{self.config_dir}/blockchain/blockchain-docker-compose.yml", "up", "--remove-orphans", "--force-recreate", "-d", "--build"])
except subprocess.CalledProcessError as e:
logging.error("Docker Compose failed to start Blockchain, please check if Docker Compose is installed (https://docs.docker.com/compose/install/) and Docker Engine is running.")
raise e
def start_nodes_docker(self):
import subprocess
try:
# First, get the list of IDs of exited containers
result_ps = subprocess.run("docker ps -aq -f status=exited --filter 'name=nebula'", shell=True, check=True, capture_output=True, text=True)
# Get the container IDs
container_ids = result_ps.stdout.strip()
if container_ids:
# Run the command to remove the containers
result_rm = subprocess.run(f"docker rm $(docker ps -aq -f status=exited --filter 'name=nebula')", shell=True, check=True, capture_output=True, text=True)
print(f"Dangling containers removed successfully: {result_rm.stdout.strip()}.")
else:
print("No dangling containers to remove.")
except subprocess.CalledProcessError as e:
print(f"Error removing stopped containers: {e}")
print(f"Error output: {e.stderr}")
except Exception as e:
print(f"Unexpected error: {e}")
logging.info("Starting nodes using Docker Compose...")
logging.info("env path: {}".format(self.env_path))
docker_compose_template = textwrap.dedent(
"""
services:
{}
"""
)
participant_template = textwrap.dedent(
"""
participant{}:
image: nebula-core
restart: no
volumes:
- {}:/nebula
- /var/run/docker.sock:/var/run/docker.sock
extra_hosts:
- "host.docker.internal:host-gateway"
ipc: host
privileged: true
command:
- /bin/bash
- -c
- |
{} && ifconfig && echo '{} host.docker.internal' >> /etc/hosts && python3.11 /nebula/nebula/node.py {}
networks:
nebula-net-scenario:
ipv4_address: {}
nebula-net-base:
{}
"""
)
participant_template = textwrap.indent(participant_template, " " * 4)
participant_gpu_template = textwrap.dedent(
"""
participant{}:
image: nebula-core
environment:
- NVIDIA_DISABLE_REQUIRE=true
restart: no
volumes:
- {}:/nebula
- /var/run/docker.sock:/var/run/docker.sock
extra_hosts:
- "host.docker.internal:host-gateway"
ipc: host
privileged: true
command:
- /bin/bash
- -c
- |
{} && ifconfig && echo '{} host.docker.internal' >> /etc/hosts && python3.11 /nebula/nebula/node.py {}
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
networks:
nebula-net-scenario:
ipv4_address: {}
nebula-net-base:
{}
"""
)
participant_gpu_template = textwrap.indent(participant_gpu_template, " " * 4)
network_template = textwrap.dedent(
"""
networks:
nebula-net-scenario:
name: nebula-net-scenario
driver: bridge
ipam:
config:
- subnet: {}
gateway: {}
nebula-net-base:
name: nebula-net-base
external: true
{}
{}
{}
"""
)
# Generate the Docker Compose file dynamically
services = ""
self.config.participants.sort(key=lambda x: x["device_args"]["idx"])
for node in self.config.participants:
idx = node["device_args"]["idx"]
path = f"/nebula/app/config/{self.scenario_name}/participant_{idx}.json"
logging.info("Starting node {} with configuration {}".format(idx, path))
logging.info("Node {} is listening on ip {}".format(idx, node["network_args"]["ip"]))
# Add one service for each participant
if node["device_args"]["accelerator"] == "gpu":
logging.info("Node {} is using GPU".format(idx))
services += participant_gpu_template.format(
idx,
self.root_path,
"sleep 10" if node["device_args"]["start"] else "sleep 0",
self.scenario.network_gateway,
path,
node["network_args"]["ip"],
"proxy:" if self.scenario.deployment and self.use_blockchain else "",
)
else:
logging.info("Node {} is using CPU".format(idx))
services += participant_template.format(
idx,
self.root_path,
"sleep 10" if node["device_args"]["start"] else "sleep 0",
self.scenario.network_gateway,
path,
node["network_args"]["ip"],
"proxy:" if self.scenario.deployment and self.use_blockchain else "",
)
docker_compose_file = docker_compose_template.format(services)
docker_compose_file += network_template.format(
self.scenario.network_subnet, self.scenario.network_gateway, "proxy:" if self.scenario.deployment and self.use_blockchain else "", "name: chainnet" if self.scenario.deployment and self.use_blockchain else "", "external: true" if self.scenario.deployment and self.use_blockchain else ""
)
# Write the Docker Compose file in config directory
with open(f"{self.config_dir}/docker-compose.yml", "w") as f:
f.write(docker_compose_file)
# Include additional config to the participants
for idx, node in enumerate(self.config.participants):
node["tracking_args"]["log_dir"] = "/nebula/app/logs"
node["tracking_args"]["config_dir"] = f"/nebula/app/config/{self.scenario_name}"
node["scenario_args"]["controller"] = self.controller
node["security_args"]["certfile"] = f"/nebula/app/certs/participant_{node['device_args']['idx']}_cert.pem"
node["security_args"]["keyfile"] = f"/nebula/app/certs/participant_{node['device_args']['idx']}_key.pem"
node["security_args"]["cafile"] = f"/nebula/app/certs/ca_cert.pem"
# Write the config file in config directory
with open(f"{self.config_dir}/participant_{node['device_args']['idx']}.json", "w") as f:
json.dump(node, f, indent=4)
# Start the Docker Compose file, catch error if any
try:
subprocess.check_call(
[
"docker",
"compose",
"-f",
f"{self.config_dir}/docker-compose.yml",
"up",
"--build",
"-d",
]
)
except subprocess.CalledProcessError as e:
raise Exception("Docker Compose failed to start, please check if Docker Compose is installed (https://docs.docker.com/compose/install/) and Docker Engine is running.")
container_ids = None
logging.info("Waiting for nodes to start...")
# Loop until all containers are running (equivalent to the number of participants)
while container_ids is None or len(container_ids) != len(self.config.participants):
time.sleep(3)
try:
# Obtain docker ids
result = subprocess.run(["docker", "compose", "-f", f"{self.config_dir}/docker-compose.yml", "ps", "-q"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode != 0:
raise Exception(f"Error obtaining docker IDs: {result.stderr}")
container_ids = result.stdout.strip().split("\n")
except subprocess.CalledProcessError as e:
raise Exception("Docker Compose failed to start, please check if Docker Compose is installed " "(https://docs.docker.com/compose/install/) and Docker Engine is running.")
# Change log and config directory in dockers to /nebula/app, and change controller endpoint
for idx, node in enumerate(self.config.participants):
# Assign docker ID to node
node["device_args"]["docker_id"] = container_ids[idx]
# Write the config file in config directory
with open(f"{self.config_dir}/participant_{node['device_args']['idx']}.json", "w") as f:
json.dump(node, f, indent=4)
def start_nodes_process(self):
logging.info("Starting nodes as processes...")
logging.info("env path: {}".format(self.env_path))
# Include additional config to the participants
for idx, node in enumerate(self.config.participants):
node["tracking_args"]["log_dir"] = os.path.join(self.root_path, "app", "logs")
node["tracking_args"]["config_dir"] = os.path.join(self.root_path, "app", "config", self.scenario_name)
node["scenario_args"]["controller"] = self.controller
node["security_args"]["certfile"] = os.path.join(self.root_path, "app", "certs", f"participant_{node['device_args']['idx']}_cert.pem")
node["security_args"]["keyfile"] = os.path.join(self.root_path, "app", "certs", f"participant_{node['device_args']['idx']}_key.pem")
node["security_args"]["cafile"] = os.path.join(self.root_path, "app", "certs", "ca_cert.pem")
# Write the config file in config directory
with open(f"{self.config_dir}/participant_{node['device_args']['idx']}.json", "w") as f:
json.dump(node, f, indent=4)
try:
if self.host_platform == "windows":
commands = f"""
$ParentDir = Split-Path -Parent $PSScriptRoot
$PID_FILE = "$PSScriptRoot\\current_scenario_pids.txt"
New-Item -Path $PID_FILE -Force -ItemType File
"""
sorted_participants = sorted(self.config.participants, key=lambda node: node["device_args"]["idx"], reverse=True)
for node in sorted_participants:
if node["device_args"]["start"]:
commands += f"Start-Sleep -Seconds 10\n"
else:
commands += f"Start-Sleep -Seconds 2\n"
commands += f'Write-Host "Running node {node["device_args"]["idx"]}..."\n'
commands += f'$OUT_FILE = "{self.root_path}\\app\\logs\\{self.scenario_name}\\participant_{node["device_args"]["idx"]}.out"\n'
commands += f'$ERROR_FILE = "{self.root_path}\\app\\logs\\{self.scenario_name}\\participant_{node["device_args"]["idx"]}.err"\n'
# Use Start-Process for executing Python in background and capture PID
commands += f"""$process = Start-Process -FilePath "python" -ArgumentList "{self.root_path}\\nebula\\node.py {self.root_path}\\app\\config\\{self.scenario_name}\\participant_{node["device_args"]["idx"]}.json" -PassThru -NoNewWindow -RedirectStandardOutput $OUT_FILE -RedirectStandardError $ERROR_FILE
Add-Content -Path $PID_FILE -Value $process.Id
"""
commands += 'Write-Host "All nodes started. PIDs stored in $PID_FILE"\n'
with open(f"/nebula/app/config/current_scenario_commands.ps1", "w") as f:
f.write(commands)
os.chmod(f"/nebula/app/config/current_scenario_commands.ps1", 0o755)
else:
commands = f'#!/bin/bash\n\nPID_FILE="$(dirname "$0")/current_scenario_pids.txt"\n\n> $PID_FILE\n\n'
sorted_participants = sorted(self.config.participants, key=lambda node: node["device_args"]["idx"], reverse=True)
for node in sorted_participants:
if node["device_args"]["start"]:
commands += f"sleep 10\n"
else:
commands += f"sleep 2\n"
commands += f"echo \"Running node {node['device_args']['idx']}...\"\n"
commands += f"OUT_FILE={self.root_path}/app/logs/{self.scenario_name}/participant_{node['device_args']['idx']}.out\n"
commands += f"python3.11 {self.root_path}/nebula/node.py {self.root_path}/app/config/{self.scenario_name}/participant_{node['device_args']['idx']}.json > $OUT_FILE 2>&1 &\n"
commands += f"echo $! >> $PID_FILE\n\n"
commands += 'echo "All nodes started. PIDs stored in $PID_FILE"\n'
with open(f"/nebula/app/config/current_scenario_commands.sh", "w") as f:
f.write(commands)
os.chmod(f"/nebula/app/config/current_scenario_commands.sh", 0o755)
except Exception as e:
raise Exception("Error starting nodes as processes: {}".format(e))
@classmethod
def remove_files_by_scenario(cls, scenario_name):
try:
shutil.rmtree(Utils.check_path(os.environ["NEBULA_CONFIG_DIR"], scenario_name))
except FileNotFoundError:
logging.warning("Files not found, nothing to remove")
except Exception as e:
logging.error("Unknown error while removing files")
logging.error(e)
raise e
try:
shutil.rmtree(Utils.check_path(os.environ["NEBULA_LOGS_DIR"], scenario_name))
except PermissionError:
# Avoid error if the user does not have enough permissions to remove the tf.events files
logging.warning("Not enough permissions to remove the files, moving them to tmp folder")
os.makedirs(
Utils.check_path(os.environ["NEBULA_ROOT"], os.path.join("app", "tmp", scenario_name)),
exist_ok=True,
)
os.chmod(
Utils.check_path(os.environ["NEBULA_ROOT"], os.path.join("app", "tmp", scenario_name)),
0o777,
)
shutil.move(
Utils.check_path(os.environ["NEBULA_LOGS_DIR"], scenario_name),
Utils.check_path(os.environ["NEBULA_ROOT"], os.path.join("app", "tmp", scenario_name))
)
except FileNotFoundError:
logging.warning("Files not found, nothing to remove")
except Exception as e:
logging.error("Unknown error while removing files")
logging.error(e)
raise e
def scenario_finished(self, timeout_seconds):
client = docker.from_env()
all_containers = client.containers.list(all=True)
containers = [container for container in all_containers if self.scenario_name.lower() in container.name.lower()]
start_time = datetime.now()
while True:
all_containers_finished = True
for container in containers:
container.reload()
if container.status != "exited":
all_containers_finished = False
break
if all_containers_finished:
return True
current_time = datetime.now()
elapsed_time = current_time - start_time
if elapsed_time.total_seconds() >= timeout_seconds:
for container in containers:
container.stop()
return False
time.sleep(5)
| 44,958 | Python | .py | 875 | 38.721143 | 320 | 0.595132 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,927 | controller.py | enriquetomasmb_nebula/nebula/controller.py | import logging
import os
import re
import signal
import subprocess
import sys
import textwrap
import threading
import time
from dotenv import load_dotenv
import psutil
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from nebula.addons.env import check_environment
from nebula.config.config import Config
from nebula.config.mender import Mender
from nebula import __version__
from nebula.scenarios import ScenarioManagement
from nebula.tests import main as deploy_tests
# Setup controller logger
class TermEscapeCodeFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None, style="%", validate=True):
super().__init__(fmt, datefmt, style, validate)
def format(self, record):
escape_re = re.compile(r"\x1b\[[0-9;]*m")
record.msg = re.sub(escape_re, "", str(record.msg))
return super().format(record)
log_console_format = "[%(levelname)s] - %(asctime)s - Controller - %(message)s"
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
# console_handler.setFormatter(logging.Formatter(log_console_format))
console_handler.setFormatter(TermEscapeCodeFormatter(log_console_format))
logging.basicConfig(
level=logging.DEBUG,
handlers=[
console_handler,
],
)
# Detect ctrl+c and run killports
def signal_handler(sig, frame):
Controller.stop()
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
class NebulaEventHandler(PatternMatchingEventHandler):
"""
NebulaEventHandler handles file system events for .sh scripts.
This class monitors the creation, modification, and deletion of .sh scripts
in a specified directory.
"""
patterns = ["*.sh", "*.ps1"]
def __init__(self):
super(NebulaEventHandler, self).__init__()
self.last_processed = {}
self.timeout_ns = 5 * 1e9
self.processing_files = set()
self.lock = threading.Lock()
def _should_process_event(self, src_path: str) -> bool:
current_time_ns = time.time_ns()
logging.info(f"Current time (ns): {current_time_ns}")
with self.lock:
if src_path in self.last_processed:
logging.info(f"Last processed time for {src_path}: {self.last_processed[src_path]}")
last_time = self.last_processed[src_path]
if current_time_ns - last_time < self.timeout_ns:
return False
self.last_processed[src_path] = current_time_ns
return True
def _is_being_processed(self, src_path: str) -> bool:
with self.lock:
if src_path in self.processing_files:
logging.info(f"Skipping {src_path} as it is already being processed.")
return True
self.processing_files.add(src_path)
return False
def _processing_done(self, src_path: str):
with self.lock:
if src_path in self.processing_files:
self.processing_files.remove(src_path)
def on_created(self, event):
"""
Handles the event when a file is created.
"""
if event.is_directory:
return
src_path = event.src_path
if not self._should_process_event(src_path):
return
if self._is_being_processed(src_path):
return
logging.info("File created: %s" % src_path)
try:
self.run_script(src_path)
finally:
self._processing_done(src_path)
def on_deleted(self, event):
"""
Handles the event when a file is deleted.
"""
if event.is_directory:
return
src_path = event.src_path
if not self._should_process_event(src_path):
return
if self._is_being_processed(src_path):
return
logging.info("File deleted: %s" % src_path)
directory_script = os.path.dirname(src_path)
pids_file = os.path.join(directory_script, "current_scenario_pids.txt")
logging.info(f"Killing processes from {pids_file}")
try:
self.kill_script_processes(pids_file)
os.remove(pids_file)
except FileNotFoundError:
logging.warning(f"{pids_file} not found.")
except Exception as e:
logging.error(f"Error while killing processes: {e}")
finally:
self._processing_done(src_path)
def run_script(self, script):
try:
logging.info("Running script: {}".format(script))
if script.endswith(".sh"):
result = subprocess.run(["bash", script], capture_output=True, text=True)
logging.info("Script output:\n{}".format(result.stdout))
if result.stderr:
logging.error("Script error:\n{}".format(result.stderr))
elif script.endswith(".ps1"):
subprocess.Popen(["powershell", "-ExecutionPolicy", "Bypass", "-File", script], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=False)
else:
logging.error("Unsupported script format.")
return
except Exception as e:
logging.error("Error while running script: {}".format(e))
def kill_script_processes(self, pids_file):
try:
with open(pids_file, "r") as f:
pids = f.readlines()
for pid in pids:
try:
pid = int(pid.strip())
if psutil.pid_exists(pid):
process = psutil.Process(pid)
children = process.children(recursive=True)
logging.info(f"Forcibly killing process {pid} and {len(children)} child processes...")
for child in children:
try:
logging.info(f"Forcibly killing child process {child.pid}")
child.kill()
except psutil.NoSuchProcess:
logging.warning(f"Child process {child.pid} already terminated.")
except Exception as e:
logging.error(f"Error while forcibly killing child process {child.pid}: {e}")
try:
logging.info(f"Forcibly killing main process {pid}")
process.kill()
except psutil.NoSuchProcess:
logging.warning(f"Process {pid} already terminated.")
except Exception as e:
logging.error(f"Error while forcibly killing main process {pid}: {e}")
else:
logging.warning(f"PID {pid} does not exist.")
except ValueError:
logging.error(f"Invalid PID value in file: {pid}")
except Exception as e:
logging.error(f"Error while forcibly killing process {pid}: {e}")
except FileNotFoundError:
logging.error(f"PID file not found: {pids_file}")
except Exception as e:
logging.error(f"Error while reading PIDs from file: {e}")
class Controller:
def __init__(self, args):
self.scenario_name = args.scenario_name if hasattr(args, "scenario_name") else None
self.start_date_scenario = None
self.federation = args.federation if hasattr(args, "federation") else None
self.topology = args.topology if hasattr(args, "topology") else None
self.waf_port = args.wafport if hasattr(args, "wafport") else 6000
self.frontend_port = args.webport if hasattr(args, "webport") else 6060
self.grafana_port = args.grafanaport if hasattr(args, "grafanaport") else 6040
self.loki_port = args.lokiport if hasattr(args, "lokiport") else 6010
self.statistics_port = args.statsport if hasattr(args, "statsport") else 8080
self.simulation = args.simulation
self.config_dir = args.config
self.test = args.test if hasattr(args, "test") else False
self.log_dir = args.logs
self.cert_dir = args.certs
self.env_path = args.env
self.production = args.production if hasattr(args, "production") else False
self.advanced_analytics = args.advanced_analytics if hasattr(args, "advanced_analytics") else False
self.matrix = args.matrix if hasattr(args, "matrix") else None
self.root_path = args.root_path if hasattr(args, "root_path") else os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.host_platform = "windows" if sys.platform == "win32" else "unix"
# Network configuration (nodes deployment in a network)
self.network_subnet = args.network_subnet if hasattr(args, "network_subnet") else None
self.network_gateway = args.network_gateway if hasattr(args, "network_gateway") else None
self.config = Config(entity="controller")
self.topologymanager = None
self.n_nodes = 0
self.mender = None if self.simulation else Mender()
self.use_blockchain = args.use_blockchain if hasattr(args, "use_blockchain") else False
self.gpu_available = False
def start(self):
banner = """
███╗ ██╗███████╗██████╗ ██╗ ██╗██╗ █████╗
████╗ ██║██╔════╝██╔══██╗██║ ██║██║ ██╔══██╗
██╔██╗ ██║█████╗ ██████╔╝██║ ██║██║ ███████║
██║╚██╗██║██╔══╝ ██╔══██╗██║ ██║██║ ██╔══██║
██║ ╚████║███████╗██████╔╝╚██████╔╝███████╗██║ ██║
╚═╝ ╚═══╝╚══════╝╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝
A Platform for Decentralized Federated Learning
Created by Enrique Tomás Martínez Beltrán
https://github.com/enriquetomasmb/nebula
"""
print("\x1b[0;36m" + banner + "\x1b[0m")
# Load the environment variables
load_dotenv(self.env_path)
# Save controller pid
with open(os.path.join(os.path.dirname(__file__), "controller.pid"), "w") as f:
f.write(str(os.getpid()))
# Check information about the environment
check_environment()
# Save the configuration in environment variables
logging.info("Saving configuration in environment variables...")
os.environ["NEBULA_ROOT"] = self.root_path
os.environ["NEBULA_LOGS_DIR"] = self.log_dir
os.environ["NEBULA_CONFIG_DIR"] = self.config_dir
os.environ["NEBULA_CERTS_DIR"] = self.cert_dir
os.environ["NEBULA_STATISTICS_PORT"] = str(self.statistics_port)
os.environ["NEBULA_ROOT_HOST"] = self.root_path
os.environ["NEBULA_HOST_PLATFORM"] = self.host_platform
if self.production:
self.run_waf()
logging.info("NEBULA WAF is running at port {}".format(self.waf_port))
logging.info("Grafana Dashboard is running at port {}".format(self.grafana_port))
if self.test:
self.run_test()
else:
self.run_frontend()
logging.info("NEBULA Frontend is running at port {}".format(self.frontend_port))
# Watchdog for running additional scripts in the host machine (i.e. during the execution of a federation)
event_handler = NebulaEventHandler()
observer = Observer()
observer.schedule(event_handler, path=self.config_dir, recursive=False)
observer.start()
if self.mender:
logging.info("[Mender.module] Mender module initialized")
time.sleep(2)
mender = Mender()
logging.info("[Mender.module] Getting token from Mender server: {}".format(os.getenv("MENDER_SERVER")))
mender.renew_token()
time.sleep(2)
logging.info("[Mender.module] Getting devices from {} with group Cluster_Thun".format(os.getenv("MENDER_SERVER")))
time.sleep(2)
devices = mender.get_devices_by_group("Cluster_Thun")
logging.info("[Mender.module] Getting a pool of devices: 5 devices")
# devices = devices[:5]
for i in self.config.participants:
logging.info("[Mender.module] Device {} | IP: {}".format(i["device_args"]["idx"], i["network_args"]["ip"]))
logging.info("[Mender.module] \tCreating artifacts...")
logging.info("[Mender.module] \tSending NEBULA Core...")
# mender.deploy_artifact_device("my-update-2.0.mender", i['device_args']['idx'])
logging.info("[Mender.module] \tSending configuration...")
time.sleep(5)
sys.exit(0)
logging.info("Press Ctrl+C for exit from NEBULA (global exit)")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
logging.info("Closing NEBULA (exiting from components)... Please wait")
observer.stop()
self.stop()
observer.join()
def run_waf(self):
docker_compose_template = textwrap.dedent(
"""
services:
{}
"""
)
waf_template = textwrap.dedent(
"""
nebula-waf:
container_name: nebula-waf
image: nebula-waf
build:
context: .
dockerfile: Dockerfile-waf
restart: unless-stopped
volumes:
- {log_path}/waf/nginx:/var/log/nginx
extra_hosts:
- "host.docker.internal:host-gateway"
ipc: host
privileged: true
ports:
- {waf_port}:80
networks:
nebula-net-base:
ipv4_address: {ip}
"""
)
grafana_template = textwrap.dedent(
"""
grafana:
container_name: nebula-waf-grafana
image: nebula-waf-grafana
build:
context: .
dockerfile: Dockerfile-grafana
restart: unless-stopped
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_USERS_ALLOW_SIGN_UP=false
- GF_SERVER_HTTP_PORT=3000
- GF_SERVER_PROTOCOL=http
- GF_SERVER_DOMAIN=localhost:{grafana_port}
- GF_SERVER_ROOT_URL=http://localhost:{grafana_port}/grafana/
- GF_SERVER_SERVE_FROM_SUB_PATH=true
- GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/var/lib/grafana/dashboards/dashboard.json
- GF_METRICS_MAX_LIMIT_TSDB=0
ports:
- {grafana_port}:3000
ipc: host
privileged: true
networks:
nebula-net-base:
ipv4_address: {ip}
"""
)
loki_template = textwrap.dedent(
"""
loki:
container_name: nebula-waf-loki
image: nebula-waf-loki
build:
context: .
dockerfile: Dockerfile-loki
restart: unless-stopped
volumes:
- ./loki-config.yml:/mnt/config/loki-config.yml
ports:
- {loki_port}:3100
user: "0:0"
command:
- '-config.file=/mnt/config/loki-config.yml'
networks:
nebula-net-base:
ipv4_address: {ip}
"""
)
promtail_template = textwrap.dedent(
"""
promtail:
container_name: nebula-waf-promtail
image: nebula-waf-promtail
build:
context: .
dockerfile: Dockerfile-promtail
restart: unless-stopped
volumes:
- {log_path}/waf/nginx:/var/log/nginx
- ./promtail-config.yml:/etc/promtail/config.yml
command:
- '-config.file=/etc/promtail/config.yml'
networks:
nebula-net-base:
ipv4_address: {ip}
"""
)
waf_template = textwrap.indent(waf_template, " " * 4)
grafana_template = textwrap.indent(grafana_template, " " * 4)
loki_template = textwrap.indent(loki_template, " " * 4)
promtail_template = textwrap.indent(promtail_template, " " * 4)
network_template = textwrap.dedent(
"""
networks:
nebula-net-base:
name: nebula-net-base
driver: bridge
ipam:
config:
- subnet: {}
gateway: {}
"""
)
# Generate the Docker Compose file dynamically
services = ""
services += waf_template.format(path=self.root_path, log_path=os.environ["NEBULA_LOGS_DIR"], waf_port=self.waf_port, gw="192.168.10.1", ip="192.168.10.200")
services += grafana_template.format(log_path=os.environ["NEBULA_LOGS_DIR"], grafana_port=self.grafana_port, loki_port=self.loki_port, ip="192.168.10.201")
services += loki_template.format(loki_port=self.loki_port, ip="192.168.10.202")
services += promtail_template.format(log_path=os.environ["NEBULA_LOGS_DIR"], ip="192.168.10.203")
docker_compose_file = docker_compose_template.format(services)
docker_compose_file += network_template.format("192.168.10.0/24", "192.168.10.1")
# Write the Docker Compose file in waf directory
with open(
f"{os.path.join(os.environ['NEBULA_ROOT'], 'nebula', 'addons', 'waf', 'docker-compose.yml')}",
"w",
) as f:
f.write(docker_compose_file)
# Start the Docker Compose file, catch error if any
try:
subprocess.check_call(
[
"docker",
"compose",
"-f",
f"{os.path.join(os.environ['NEBULA_ROOT'], 'nebula', 'addons', 'waf', 'docker-compose.yml')}",
"up",
"--build",
"-d",
]
)
except subprocess.CalledProcessError as e:
raise Exception("Docker Compose failed to start, please check if Docker Compose is installed (https://docs.docker.com/compose/install/) and Docker Engine is running.")
def run_frontend(self):
if sys.platform == "win32":
if not os.path.exists("//./pipe/docker_Engine"):
raise Exception("Docker is not running, please check if Docker is running and Docker Compose is installed.")
else:
if not os.path.exists("/var/run/docker.sock"):
raise Exception("/var/run/docker.sock not found, please check if Docker is running and Docker Compose is installed.")
docker_compose_template = textwrap.dedent(
"""
services:
{}
"""
)
frontend_template = textwrap.dedent(
"""
nebula-frontend:
container_name: nebula-frontend
image: nebula-frontend
build:
context: .
restart: unless-stopped
volumes:
- {path}:/nebula
- /var/run/docker.sock:/var/run/docker.sock
- ./config/nebula:/etc/nginx/sites-available/default
environment:
- NEBULA_PRODUCTION={production}
- NEBULA_GPU_AVAILABLE={gpu_available}
- NEBULA_ADVANCED_ANALYTICS={advanced_analytics}
- SERVER_LOG=/nebula/app/logs/server.log
- NEBULA_LOGS_DIR=/nebula/app/logs/
- NEBULA_CONFIG_DIR=/nebula/app/config/
- NEBULA_CERTS_DIR=/nebula/app/certs/
- NEBULA_ENV_PATH=/nebula/app/.env
- NEBULA_ROOT_HOST={path}
- NEBULA_HOST_PLATFORM={platform}
- NEBULA_DEFAULT_USER=admin
- NEBULA_DEFAULT_PASSWORD=admin
- NEBULA_FRONTEND_PORT={frontend_port}
extra_hosts:
- "host.docker.internal:host-gateway"
ipc: host
privileged: true
ports:
- {frontend_port}:80
- {statistics_port}:8080
networks:
nebula-net-base:
ipv4_address: {ip}
"""
)
frontend_template = textwrap.indent(frontend_template, " " * 4)
network_template = textwrap.dedent(
"""
networks:
nebula-net-base:
name: nebula-net-base
driver: bridge
ipam:
config:
- subnet: {}
gateway: {}
"""
)
network_template_external = textwrap.dedent(
"""
networks:
nebula-net-base:
external: true
"""
)
try:
subprocess.check_call(["nvidia-smi"])
self.gpu_available = True
except Exception as e:
logging.info("No GPU available for the frontend, nodes will be deploy in CPU mode")
# Generate the Docker Compose file dynamically
services = ""
services += frontend_template.format(production=self.production, gpu_available=self.gpu_available, advanced_analytics=self.advanced_analytics, path=self.root_path, platform=self.host_platform, gw="192.168.10.1", ip="192.168.10.100", frontend_port=self.frontend_port, statistics_port=self.statistics_port)
docker_compose_file = docker_compose_template.format(services)
if self.production:
# If WAF is enabled, we need to use the same network
docker_compose_file += network_template_external
else:
docker_compose_file += network_template.format("192.168.10.0/24", "192.168.10.1")
# Write the Docker Compose file in config directory
with open(
f"{os.path.join(os.environ['NEBULA_ROOT'], 'nebula', 'frontend', 'docker-compose.yml')}",
"w",
) as f:
f.write(docker_compose_file)
# Start the Docker Compose file, catch error if any
try:
subprocess.check_call(
[
"docker",
"compose",
"-f",
f"{os.path.join(os.environ['NEBULA_ROOT'], 'nebula', 'frontend', 'docker-compose.yml')}",
"up",
"--build",
"-d",
]
)
except subprocess.CalledProcessError as e:
raise Exception("Docker Compose failed to start, please check if Docker Compose is installed (https://docs.docker.com/compose/install/) and Docker Engine is running.")
except Exception as e:
raise Exception("Error while starting the frontend: {}".format(e))
def run_test(self):
deploy_tests.start()
@staticmethod
def stop_frontend():
if sys.platform == "win32":
try:
# kill all the docker containers which contain the word "nebula"
commands = [
"""docker kill $(docker ps -q --filter ancestor=nebula-frontend) | Out-Null""",
"""docker rm $(docker ps -a -q --filter ancestor=nebula-frontend) | Out-Null""",
]
for command in commands:
time.sleep(1)
exit_code = os.system(f'powershell.exe -Command "{command}"')
# logging.info(f"Windows Command '{command}' executed with exit code: {exit_code}")
except Exception as e:
raise Exception("Error while killing docker containers: {}".format(e))
else:
try:
commands = [
"""docker kill $(docker ps -q --filter ancestor=nebula-frontend) > /dev/null 2>&1""",
"""docker rm $(docker ps -a -q --filter ancestor=nebula-frontend) > /dev/null 2>&1""",
]
for command in commands:
time.sleep(1)
exit_code = os.system(command)
# logging.info(f"Linux Command '{command}' executed with exit code: {exit_code}")
except Exception as e:
raise Exception("Error while killing docker containers: {}".format(e))
@staticmethod
def stop_network():
if sys.platform == "win32":
try:
# kill all the docker containers which contain the word "nebula"
commands = ["""docker network rm $(docker network ls | Where-Object { ($_ -split '\s+')[1] -like 'nebula-net-base' } | ForEach-Object { ($_ -split '\s+')[0] }) | Out-Null"""]
for command in commands:
time.sleep(1)
exit_code = os.system(f'powershell.exe -Command "{command}"')
# logging.info(f"Windows Command '{command}' executed with exit code: {exit_code}")
except Exception as e:
raise Exception("Error while killing docker containers: {}".format(e))
else:
try:
commands = ["""docker network rm $(docker network ls | grep nebula-net-base | awk '{print $1}') > /dev/null 2>&1"""]
for command in commands:
time.sleep(1)
exit_code = os.system(command)
# logging.info(f"Linux Command '{command}' executed with exit code: {exit_code}")
except Exception as e:
raise Exception("Error while killing docker containers: {}".format(e))
@staticmethod
def stop_waf():
if sys.platform == "win32":
try:
# kill all the docker containers which contain the word "nebula"
commands = [
"""docker compose -p waf down | Out-Null""",
"""docker compose -p waf rm | Out-Null""",
]
for command in commands:
time.sleep(1)
exit_code = os.system(f'powershell.exe -Command "{command}"')
# logging.info(f"Windows Command '{command}' executed with exit code: {exit_code}")
except Exception as e:
raise Exception("Error while killing docker containers: {}".format(e))
else:
try:
commands = [
"""docker compose -p waf down > /dev/null 2>&1""",
"""docker compose -p waf rm > /dev/null 2>&1""",
]
for command in commands:
time.sleep(1)
exit_code = os.system(command)
# logging.info(f"Linux Command '{command}' executed with exit code: {exit_code}")
except Exception as e:
raise Exception("Error while killing docker containers: {}".format(e))
@staticmethod
def stop():
logging.info("Closing NEBULA (exiting from components)... Please wait")
ScenarioManagement.stop_participants()
ScenarioManagement.stop_blockchain()
Controller.stop_frontend()
Controller.stop_waf()
Controller.stop_network()
controller_pid_file = os.path.join(os.path.dirname(__file__), "controller.pid")
try:
with open(controller_pid_file, "r") as f:
pid = int(f.read())
os.kill(pid, signal.SIGKILL)
os.remove(controller_pid_file)
except Exception as e:
logging.error(f"Error while killing controller process: {e}")
sys.exit(0)
| 29,254 | Python | .py | 617 | 32.549433 | 312 | 0.540754 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,928 | __init__.py | enriquetomasmb_nebula/nebula/__init__.py | __version__ = "1.0.0"
__description__ = "NEBULA: A Platform for Decentralized Federated Learning"
__long_description__ = "NEBULA: A Platform for Decentralized Federated Learning"
__author__ = "Enrique Tomás Martínez Beltrán"
__long_description_content_type__ = "text/markdown"
__keywords__ = "federated learning, decentralized federated learning, machine learning, deep learning, neural networks, collaborative learning"
| 424 | Python | .py | 6 | 69.166667 | 143 | 0.771084 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,929 | node.py | enriquetomasmb_nebula/nebula/node.py | import os
import sys
import time
import random
import warnings
import numpy as np
import torch
torch.multiprocessing.set_start_method("spawn", force=True)
# Ignore CryptographyDeprecationWarning (datatime issues with cryptography library)
from cryptography.utils import CryptographyDeprecationWarning
warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning)
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from nebula.config.config import Config
import logging
from nebula.core.datasets.mnist.mnist import MNISTDataset
from nebula.core.datasets.fashionmnist.fashionmnist import FashionMNISTDataset
from nebula.core.datasets.syscall.syscall import SYSCALLDataset
from nebula.core.datasets.cifar10.cifar10 import CIFAR10Dataset
from nebula.core.datasets.militarysar.militarysar import MilitarySARDataset
from nebula.core.datasets.datamodule import DataModule
from nebula.core.training.lightning import Lightning
from nebula.core.training.siamese import Siamese
from nebula.core.models.cifar10.dualagg import DualAggModel
from nebula.core.models.mnist.mlp import MNISTModelMLP
from nebula.core.models.mnist.cnn import MNISTModelCNN
from nebula.core.models.fashionmnist.mlp import FashionMNISTModelMLP
from nebula.core.models.fashionmnist.cnn import FashionMNISTModelCNN
from nebula.core.models.syscall.mlp import SyscallModelMLP
from nebula.core.models.syscall.autoencoder import SyscallModelAutoencoder
from nebula.core.models.cifar10.resnet import CIFAR10ModelResNet
from nebula.core.models.cifar10.fastermobilenet import FasterMobileNet
from nebula.core.models.cifar10.simplemobilenet import SimpleMobileNetV1
from nebula.core.models.cifar10.cnn import CIFAR10ModelCNN
from nebula.core.models.cifar10.cnnV2 import CIFAR10ModelCNN_V2
from nebula.core.models.cifar10.cnnV3 import CIFAR10ModelCNN_V3
from nebula.core.models.militarysar.cnn import MilitarySARModelCNN
from nebula.core.datasets.cifar100.cifar100 import CIFAR100Dataset
from nebula.core.datasets.emnist.emnist import EMNISTDataset
from nebula.core.datasets.kitsun.kitsun import KITSUNDataset
from nebula.core.models.cifar100.cnn import CIFAR100ModelCNN
from nebula.core.models.emnist.cnn import EMNISTModelCNN
from nebula.core.models.emnist.mlp import EMNISTModelMLP
from nebula.core.models.kitsun.mlp import KitsunModelMLP
from nebula.core.models.syscall.svm import SyscallModelSGDOneClassSVM
from nebula.core.engine import MaliciousNode, AggregatorNode, TrainerNode, ServerNode, IdleNode
from nebula.core.role import Role
# os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
# os.environ["TORCH_LOGS"] = "+dynamo"
# os.environ["TORCHDYNAMO_VERBOSE"] = "1"
async def main(config):
n_nodes = config.participant["scenario_args"]["n_nodes"]
model_name = config.participant["model_args"]["model"]
idx = config.participant["device_args"]["idx"]
additional_node_status = config.participant["mobility_args"]["additional_node"]["status"]
additional_node_round = config.participant["mobility_args"]["additional_node"]["round_start"]
attacks = config.participant["adversarial_args"]["attacks"]
poisoned_persent = config.participant["adversarial_args"]["poisoned_sample_percent"]
poisoned_ratio = config.participant["adversarial_args"]["poisoned_ratio"]
targeted = str(config.participant["adversarial_args"]["targeted"])
target_label = config.participant["adversarial_args"]["target_label"]
target_changed_label = config.participant["adversarial_args"]["target_changed_label"]
noise_type = config.participant["adversarial_args"]["noise_type"]
iid = config.participant["data_args"]["iid"]
partition_selection = config.participant["data_args"]["partition_selection"]
partition_parameter = np.array(config.participant["data_args"]["partition_parameter"], dtype=np.float64)
label_flipping = False
data_poisoning = False
model_poisoning = False
if attacks == "Label Flipping":
label_flipping = True
poisoned_ratio = 0
if targeted == "true" or targeted == "True":
targeted = True
else:
targeted = False
elif attacks == "Sample Poisoning":
data_poisoning = True
if targeted == "true" or targeted == "True":
targeted = True
else:
targeted = False
elif attacks == "Model Poisoning":
model_poisoning = True
else:
label_flipping = False
data_poisoning = False
targeted = False
poisoned_persent = 0
poisoned_ratio = 0
# Adjust the total number of nodes and the index of the current node for CFL, as it doesn't require a specific partition for the server (not used for training)
if config.participant["scenario_args"]["federation"] == "CFL":
n_nodes -= 1
if idx > 0:
idx -= 1
dataset = None
dataset_str = config.participant["data_args"]["dataset"]
num_workers = config.participant["data_args"]["num_workers"]
model = None
if dataset_str == "MNIST":
dataset = MNISTDataset(num_classes=10, partition_id=idx, partitions_number=n_nodes, iid=iid, partition=partition_selection, partition_parameter=partition_parameter, seed=42, config=config)
if model_name == "MLP":
model = MNISTModelMLP()
elif model_name == "CNN":
model = MNISTModelCNN()
else:
raise ValueError(f"Model {model} not supported for dataset {dataset_str}")
elif dataset_str == "FashionMNIST":
dataset = FashionMNISTDataset(num_classes=10, partition_id=idx, partitions_number=n_nodes, iid=iid, partition=partition_selection, partition_parameter=partition_parameter, seed=42, config=config)
if model_name == "MLP":
model = FashionMNISTModelMLP()
elif model_name == "CNN":
model = FashionMNISTModelCNN()
else:
raise ValueError(f"Model {model} not supported for dataset {dataset_str}")
elif dataset_str == "EMNIST":
dataset = EMNISTDataset(num_classes=10, partition_id=idx, partitions_number=n_nodes, iid=iid, partition=partition_selection, partition_parameter=partition_parameter, seed=42, config=config)
if model_name == "MLP":
model = EMNISTModelMLP()
elif model_name == "CNN":
model = EMNISTModelCNN()
else:
raise ValueError(f"Model {model} not supported for dataset {dataset_str}")
elif dataset_str == "SYSCALL":
dataset = SYSCALLDataset(num_classes=10, partition_id=idx, partitions_number=n_nodes, iid=iid, partition=partition_selection, partition_parameter=partition_parameter, seed=42, config=config)
if model_name == "MLP":
model = SyscallModelMLP()
elif model_name == "SVM":
model = SyscallModelSGDOneClassSVM()
elif model_name == "Autoencoder":
model = SyscallModelAutoencoder()
else:
raise ValueError(f"Model {model} not supported for dataset {dataset_str}")
elif dataset_str == "CIFAR10":
dataset = CIFAR10Dataset(num_classes=10, partition_id=idx, partitions_number=n_nodes, iid=iid, partition=partition_selection, partition_parameter=partition_parameter, seed=42, config=config)
if model_name == "ResNet9":
model = CIFAR10ModelResNet(classifier="resnet9")
elif model_name == "fastermobilenet":
model = FasterMobileNet()
elif model_name == "simplemobilenet":
model = SimpleMobileNetV1()
elif model_name == "CNN":
model = CIFAR10ModelCNN()
elif model_name == "CNNv2":
model = CIFAR10ModelCNN_V2()
elif model_name == "CNNv3":
model = CIFAR10ModelCNN_V3()
else:
raise ValueError(f"Model {model} not supported for dataset {dataset_str}")
elif dataset_str == "CIFAR100":
dataset = CIFAR100Dataset(num_classes=100, partition_id=idx, partitions_number=n_nodes, iid=iid, partition=partition_selection, partition_parameter=partition_parameter, seed=42, config=config)
if model_name == "CNN":
model = CIFAR100ModelCNN()
else:
raise ValueError(f"Model {model} not supported for dataset {dataset_str}")
elif dataset_str == "KITSUN":
dataset = KITSUNDataset(num_classes=10, partition_id=idx, partitions_number=n_nodes, iid=iid, partition=partition_selection, partition_parameter=partition_parameter, seed=42, config=config)
if model_name == "MLP":
model = KitsunModelMLP()
else:
raise ValueError(f"Model {model} not supported for dataset {dataset_str}")
elif dataset_str == "MilitarySAR":
dataset = MilitarySARDataset(num_classes=10, partition_id=idx, partitions_number=n_nodes, iid=iid, partition=partition_selection, partition_parameter=partition_parameter, seed=42, config=config)
model = MilitarySARModelCNN()
else:
raise ValueError(f"Dataset {dataset_str} not supported")
dataset = DataModule(
train_set=dataset.train_set,
train_set_indices=dataset.train_indices_map,
test_set=dataset.test_set,
test_set_indices=dataset.test_indices_map,
local_test_set_indices=dataset.local_test_indices_map,
num_workers=num_workers,
partition_id=idx,
partitions_number=n_nodes,
batch_size=dataset.batch_size,
label_flipping=label_flipping,
data_poisoning=data_poisoning,
poisoned_persent=poisoned_persent,
poisoned_ratio=poisoned_ratio,
targeted=targeted,
target_label=target_label,
target_changed_label=target_changed_label,
noise_type=noise_type,
)
# - Import MNISTDatasetScikit (not torch component)
# - Import scikit-learn model
# - Import ScikitDataModule
# - Import Scikit as trainer
# - Import aggregation algorithm adapted to scikit-learn models (e.g. FedAvgSVM)
trainer = None
trainer_str = config.participant["training_args"]["trainer"]
if trainer_str == "lightning":
trainer = Lightning
elif trainer_str == "scikit":
raise NotImplementedError
elif trainer_str == "siamese" and dataset_str == "CIFAR10":
trainer = Siamese
model = DualAggModel()
config.participant["model_args"]["model"] = "DualAggModel"
config.participant["data_args"]["dataset"] = "CIFAR10"
config.participant["aggregator_args"]["algorithm"] = "DualHistAgg"
else:
raise ValueError(f"Trainer {trainer_str} not supported")
if config.participant["device_args"]["malicious"]:
node_cls = MaliciousNode
else:
if config.participant["device_args"]["role"] == Role.AGGREGATOR:
node_cls = AggregatorNode
elif config.participant["device_args"]["role"] == Role.TRAINER:
node_cls = TrainerNode
elif config.participant["device_args"]["role"] == Role.SERVER:
node_cls = ServerNode
elif config.participant["device_args"]["role"] == Role.IDLE:
node_cls = IdleNode
else:
raise ValueError(f"Role {config.participant['device_args']['role']} not supported")
VARIABILITY = 0.5
def randomize_value(value, variability):
min_value = max(0, value - variability)
max_value = value + variability
return random.uniform(min_value, max_value)
config_keys = [
["reporter_args", "report_frequency"],
["discoverer_args", "discovery_frequency"],
["health_args", "health_interval"],
["health_args", "grace_time_health"],
["health_args", "check_alive_interval"],
["health_args", "send_alive_interval"],
["forwarder_args", "forwarder_interval"],
["forwarder_args", "forward_messages_interval"],
]
for keys in config_keys:
value = config.participant
for key in keys[:-1]:
value = value[key]
value[keys[-1]] = randomize_value(value[keys[-1]], VARIABILITY)
logging.info(f"Starting node {idx} with model {model_name}, trainer {trainer.__name__}, and as {node_cls.__name__}")
node = node_cls(model=model, dataset=dataset, config=config, trainer=trainer, security=False, model_poisoning=model_poisoning, poisoned_ratio=poisoned_ratio, noise_type=noise_type)
await node.start_communications()
await node.deploy_federation()
# If it is an additional node, it should wait until additional_node_round to connect to the network
# In order to do that, it should request the current round to the controller
if additional_node_status:
logging.info(f"Waiting for round {additional_node_round} to start")
time.sleep(6000) # DEBUG purposes
import requests
url = f'http://{node.config.participant["scenario_args"]["controller"]}/platform/{node.config.participant["scenario_args"]["name"]}/round'
current_round = int(requests.get(url).json()["round"])
while current_round < additional_node_round:
logging.info(f"Waiting for round {additional_node_round} to start")
time.sleep(10)
logging.info(f"Round {additional_node_round} started, connecting to the network")
if node.cm is not None:
await node.cm.network_wait()
if __name__ == "__main__":
config_path = str(sys.argv[1])
config = Config(entity="participant", participant_config_file=config_path)
if sys.platform == "win32" or config.participant["scenario_args"]["deployment"] == "docker":
import asyncio
asyncio.run(main(config), debug=False)
else:
try:
import uvloop
uvloop.run(main(config), debug=False)
except ImportError:
logging.warning("uvloop not available, using default loop")
import asyncio
asyncio.run(main(config), debug=False)
| 13,886 | Python | .py | 268 | 44.626866 | 203 | 0.700787 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,930 | config.py | enriquetomasmb_nebula/nebula/config/config.py | import json
import logging
import os
from logging import Formatter, FileHandler
CYAN = "\x1b[0;36m"
RESET = "\x1b[0m"
TRAINING_LOGGER = "nebula.training"
class Config:
topology = {}
participant = {}
participants = [] # Configuration of each participant (this information is stored only in the controller)
participants_path = []
def __init__(self, entity, topology_config_file=None, participant_config_file=None):
self.entity = entity
if topology_config_file is not None:
self.set_topology_config(topology_config_file)
if participant_config_file is not None:
self.set_participant_config(participant_config_file)
if self.participant != {}:
self.__default_config()
self.__set_default_logging()
self.__set_training_logging()
def __getstate__(self):
# Return the attributes of the class that should be serialized
return {"topology": self.topology, "participant": self.participant}
def __setstate__(self, state):
# Set the attributes of the class from the serialized state
self.topology = state["topology"]
self.participant = state["participant"]
def get_topology_config(self):
return json.dumps(self.topology, indent=2)
def get_participant_config(self):
return json.dumps(self.participant, indent=2)
def get_train_logging_config(self):
# TBD
pass
def __default_config(self):
self.participant["device_args"]["name"] = f"participant_{self.participant['device_args']['idx']}_{self.participant['network_args']['ip']}_{self.participant['network_args']['port']}"
self.participant["network_args"]["addr"] = f"{self.participant['network_args']['ip']}:{self.participant['network_args']['port']}"
def __set_default_logging(self):
experiment_name = self.participant["scenario_args"]["name"]
self.log_dir = os.path.join(self.participant["tracking_args"]["log_dir"], experiment_name)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.log_filename = f"{self.log_dir}/participant_{self.participant['device_args']['idx']}"
os.makedirs(os.path.dirname(self.log_filename), exist_ok=True)
console_handler, file_handler, file_handler_only_debug, exp_errors_file_handler = self.__setup_logging(self.log_filename)
level = logging.DEBUG if self.participant["device_args"]["logging"] else logging.CRITICAL
logging.basicConfig(level=level, handlers=[console_handler, file_handler, file_handler_only_debug, exp_errors_file_handler])
def __setup_logging(self, log_filename):
info_file_format = f"%(asctime)s - {self.participant['device_args']['name']} - [%(filename)s:%(lineno)d] %(message)s"
debug_file_format = f"%(asctime)s - {self.participant['device_args']['name']} - [%(filename)s:%(lineno)d] %(message)s\n[in %(pathname)s:%(lineno)d]"
log_console_format = f"{CYAN}%(asctime)s - {self.participant['device_args']['name']} - [%(filename)s:%(lineno)d]{RESET}\n%(message)s"
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.CRITICAL)
console_handler.setFormatter(Formatter(log_console_format))
file_handler = FileHandler("{}.log".format(log_filename), mode="w", encoding="utf-8")
file_handler.setLevel(logging.INFO if self.participant["device_args"]["logging"] else logging.CRITICAL)
file_handler.setFormatter(Formatter(info_file_format))
file_handler_only_debug = FileHandler("{}_debug.log".format(log_filename), mode="w", encoding="utf-8")
file_handler_only_debug.setLevel(logging.DEBUG if self.participant["device_args"]["logging"] else logging.CRITICAL)
file_handler_only_debug.addFilter(lambda record: record.levelno == logging.DEBUG)
file_handler_only_debug.setFormatter(Formatter(debug_file_format))
exp_errors_file_handler = FileHandler("{}_error.log".format(log_filename), mode="w", encoding="utf-8")
exp_errors_file_handler.setLevel(logging.WARNING if self.participant["device_args"]["logging"] else logging.CRITICAL)
exp_errors_file_handler.setFormatter(Formatter(debug_file_format))
return console_handler, file_handler, file_handler_only_debug, exp_errors_file_handler
def __set_training_logging(self):
training_log_filename = f"{self.log_filename}_training"
info_file_format = f"%(asctime)s - {self.participant['device_args']['name']} - [%(filename)s:%(lineno)d] %(message)s"
log_console_format = f"{CYAN}%(asctime)s - {self.participant['device_args']['name']} - [%(filename)s:%(lineno)d]{RESET}\n%(message)s"
level = logging.DEBUG if self.participant["device_args"]["logging"] else logging.CRITICAL
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.CRITICAL)
console_handler.setFormatter(Formatter(log_console_format))
file_handler = FileHandler("{}.log".format(training_log_filename), mode="w", encoding="utf-8")
file_handler.setLevel(level)
file_handler.setFormatter(Formatter(info_file_format))
logger = logging.getLogger(TRAINING_LOGGER)
logger.setLevel(level)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.propagate = False
pl_logger = logging.getLogger("lightning.pytorch")
pl_logger.setLevel(logging.INFO)
pl_logger.handlers = []
pl_logger.propagate = False
pl_logger.addHandler(console_handler)
pl_logger.addHandler(file_handler)
def to_json(self):
# Return participant configuration as a json string
return json.dumps(self.participant, sort_keys=False, indent=2)
# Read the configuration file scenario_config.json, and return a dictionary with the configuration
def set_participant_config(self, participant_config):
with open(participant_config) as json_file:
self.participant = json.load(json_file)
def set_topology_config(self, topology_config_file):
with open(topology_config_file) as json_file:
self.topology = json.load(json_file)
def add_participant_config(self, participant_config):
with open(participant_config) as json_file:
self.participants.append(json.load(json_file))
def set_participants_config(self, participants_config):
self.participants = []
self.participants_path = participants_config
for participant in participants_config:
self.add_participant_config(participant)
def add_participants_config(self, participants_config):
self.participants_path = participants_config
for participant in participants_config:
self.add_participant_config(participant)
def add_neighbor_from_config(self, addr):
if self.participant != {}:
if self.participant["network_args"]["neighbors"] == "":
self.participant["network_args"]["neighbors"] = addr
self.participant["mobility_args"]["neighbors_distance"][addr] = None
else:
if addr not in self.participant["network_args"]["neighbors"]:
self.participant["network_args"]["neighbors"] += " " + addr
self.participant["mobility_args"]["neighbors_distance"][addr] = None
def update_neighbors_from_config(self, current_connections, dest_addr):
final_neighbors = []
for n in current_connections:
if n != dest_addr:
final_neighbors.append(n)
final_neighbors_string = " ".join(final_neighbors)
# Update neighbors
self.participant["network_args"]["neighbors"] = final_neighbors_string
# Update neighbors location
self.participant["mobility_args"]["neighbors_distance"] = {n: self.participant["mobility_args"]["neighbors_distance"][n] for n in final_neighbors if n in self.participant["mobility_args"]["neighbors_distance"]}
logging.info(f"Final neighbors: {final_neighbors_string} (config updated))")
def remove_neighbor_from_config(self, addr):
if self.participant != {}:
if self.participant["network_args"]["neighbors"] != "":
self.participant["network_args"]["neighbors"] = self.participant["network_args"]["neighbors"].replace(addr, "").replace(" ", " ").strip()
def reload_config_file(self):
config_dir = self.participant["tracking_args"]["config_dir"]
with open(f"{config_dir}/participant_{self.participant['device_args']['idx']}.json", "w") as f:
f.write(self.to_json())
| 8,737 | Python | .py | 139 | 53.719424 | 218 | 0.674119 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,931 | mender.py | enriquetomasmb_nebula/nebula/config/mender.py | import json
import os
from typing import List
import requests
import logging
import base64
class Mender:
def __init__(self):
self.server = os.environ.get("MENDER_SERVER")
self.user = os.environ.get("MENDER_USER")
self.password = os.environ.get("MENDER_PASSWORD")
self.token = os.environ.get("MENDER_TOKEN")
logging.info("Mender server: {}".format(self.server))
def get_token(self):
return self.token
def renew_token(self):
string = self.user + ":" + self.password
base64string = base64.b64encode(string.encode("utf-8"))
headers = {"Accept": "application/json", "Authorization": "Basic {}".format(str(base64string, "utf-8")), "Content-Type": "application/json"}
r = requests.post(f"{self.server}/api/management/v1/useradm/auth/login", headers=headers)
self.token = r.text
@staticmethod
def generate_artifact(type_artifact, artifact_name, device_type, file_path):
os.system(f"mender-artifact write module-image -T {type_artifact} -n {artifact_name} -t {device_type} -o {artifact_name}.mender -f {file_path}")
def get_artifacts(self):
headers = {"Accept": "application/json", "Authorization": "Bearer {}".format(self.get_token())}
r = requests.get(f"{self.server}/api/management/v1/deployments/artifacts", headers=headers)
logging.info(json.dumps(r.json(), indent=2))
def upload_artifact(self, artifact_path, description):
headers = {
"Accept": "application/json",
"Authorization": "Bearer {}".format(self.get_token()),
}
multipart_form_data = {
"description": (None, f"{description}"),
"artifact": (None, open(f"{artifact_path}", "rb")),
}
r = requests.post(f"{self.server}/api/management/v1/deployments/artifacts", files=multipart_form_data, headers=headers)
logging.info(r.text)
def deploy_artifact_device(self, artifact_name, device):
headers = {"Accept": "application/json", "Authorization": "Bearer {}".format(self.get_token()), "Content-Type": "application/json"}
json_req = {"artifact_name": f"{artifact_name}", "devices": [f"{device}"], "name": f"{device}", "all_devices": False}
r = requests.post(f"{self.server}/api/management/v1/deployments/deployments", json=json_req, headers=headers)
logging.info(r.text)
def deploy_artifact_list(self, artifact_name, devices: List[str]):
headers = {"Accept": "application/json", "Authorization": "Basic {}".format((self.user + ":" + self.password).encode("base64")), "Content-Type": "application/form-data"}
json_req = {
"artifact_name": f"{artifact_name}",
"devices": devices,
"name": "Example_deployment",
}
r = requests.post(f"{self.server}/api/management/v1/deployments/deployments", json=json_req, headers=headers)
logging.info(r.text)
def get_info_deployment(self, deployment_id):
headers = {"Accept": "application/json", "Authorization": "Bearer {}".format(self.get_token())}
r = requests.get(f"{self.server}/api/management/v1/deployments/deployments/{deployment_id}", headers=headers)
logging.info(json.dumps(r.json(), indent=2))
def get_my_info(self):
headers = {"Accept": "application/json", "Authorization": "Bearer {}".format(self.get_token())}
r = requests.get(f"{self.server}/api/management/v1/useradm/users/me", headers=headers)
logging.info(json.dumps(r.json(), indent=2))
def get_devices(self):
headers = {"Accept": "application/json", "Authorization": "Bearer {}".format(self.get_token())}
r = requests.get(f"{self.server}/api/management/v2/devauth/devices", params={"per_page": "300"}, headers=headers)
logging.info(json.dumps(r.json(), indent=2))
def get_devices_by_group(self, group):
headers = {"Accept": "application/json", "Authorization": "Bearer {}".format(self.get_token())}
json_req = {
"page": 1,
"per_page": 300,
"filters": [{"scope": "system", "attribute": "group", "type": "$eq", "value": group}, {"scope": "identity", "attribute": "status", "type": "$eq", "value": "accepted"}],
"sort": [],
"attributes": [
{"scope": "identity", "attribute": "status"},
{"scope": "inventory", "attribute": "artifact_name"},
{"scope": "inventory", "attribute": "device_type"},
{"scope": "inventory", "attribute": "rootfs-image.version"},
{"scope": "monitor", "attribute": "alerts"},
{"scope": "system", "attribute": "created_ts"},
{"scope": "system", "attribute": "updated_ts"},
{"scope": "tags", "attribute": "name"},
{"scope": "identity", "attribute": "name"},
],
}
r = requests.post(f"{self.server}/api/management/v2/inventory/filters/search", json=json_req, headers=headers)
logging.info(json.dumps(r.json(), indent=2))
# json to file
with open("devices_{}.json".format(group), "w") as outfile:
json.dump(r.json(), outfile, indent=2)
def get_info_device(self, device_id):
headers = {"Accept": "application/json", "Authorization": "Bearer {}".format(self.get_token())}
r = requests.get(f"{self.server}/api/management/v1/inventory/devices/{device_id}", headers=headers)
logging.info(json.dumps(r.json(), indent=2))
def get_connected_device(self, device_id):
headers = {"Accept": "application/json", "Authorization": "Bearer {}".format(self.get_token())}
r = requests.get(f"{self.server}/api/management/v1/deviceconnect/devices/{device_id}", headers=headers)
logging.info(json.dumps(r.json(), indent=2))
| 5,896 | Python | .py | 97 | 51.226804 | 180 | 0.619594 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,932 | app.py | enriquetomasmb_nebula/nebula/frontend/app.py | import argparse
import asyncio
import datetime
import io
import json
import logging
import multiprocessing
import os
import time
import requests
import signal
import sys
import zipfile
from urllib.parse import urlencode
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".."))
logging.basicConfig(level=logging.INFO)
from ansi2html import Ansi2HTMLConverter
from nebula.frontend.utils import Utils
from nebula.frontend.database import (
initialize_databases,
list_users,
verify,
verify_hash_algorithm,
delete_user_from_db,
add_user,
update_user,
scenario_update_record,
scenario_set_all_status_to_finished,
get_running_scenario,
get_user_info,
get_scenario_by_name,
list_nodes_by_scenario_name,
remove_nodes_by_scenario_name,
get_run_hashes_scenario,
remove_scenario_by_name,
scenario_set_status_to_finished,
get_all_scenarios_and_check_completed,
check_scenario_with_role,
update_node_record,
save_notes,
get_notes,
remove_note,
)
from fastapi import FastAPI, Request, Depends, HTTPException, status, Form, Response, WebSocket, WebSocketDisconnect, BackgroundTasks
from fastapi.responses import HTMLResponse, RedirectResponse, JSONResponse, FileResponse, PlainTextResponse, StreamingResponse
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from starlette.middleware.sessions import SessionMiddleware
from starlette.exceptions import HTTPException as StarletteHTTPException
from typing import Any, Dict
class Settings:
port: int = os.environ.get("NEBULA_FRONTEND_PORT", 6060)
production: bool = os.environ.get("NEBULA_PRODUCTION", "False") == "True"
gpu_available: bool = os.environ.get("NEBULA_GPU_AVAILABLE", "False") == "True"
advanced_analytics: bool = os.environ.get("NEBULA_ADVANCED_ANALYTICS", "False") == "True"
host_platform: str = os.environ.get("NEBULA_HOST_PLATFORM", "unix")
log_dir: str = os.environ.get("NEBULA_LOGS_DIR")
config_dir: str = os.environ.get("NEBULA_CONFIG_DIR")
cert_dir: str = os.environ.get("NEBULA_CERTS_DIR")
root_host_path: str = os.environ.get("NEBULA_ROOT_HOST")
config_frontend_dir: str = os.environ.get("FEDELLAR_CONFIG_FRONTEND_DIR", "config")
statistics_port: int = os.environ.get("NEBULA_STATISTICS_PORT", 8080)
secret_key: str = os.environ.get("SECRET_KEY", os.urandom(24).hex())
PERMANENT_SESSION_LIFETIME: datetime.timedelta = datetime.timedelta(minutes=60)
templates_dir: str = "templates"
settings = Settings()
logging.info(f"NEBULA_PRODUCTION: {settings.production}")
logging.info(f"NEBULA_ADVANCED_ANALYTICS: {settings.advanced_analytics}")
app = FastAPI()
app.add_middleware(SessionMiddleware, secret_key=settings.secret_key)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.mount("/nebula/static", StaticFiles(directory="static"), name="static")
class ConnectionManager:
def __init__(self):
self.active_connections: list[WebSocket] = []
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.append(websocket)
message = {"type": "control", "message": f"Client #{len(self.active_connections)} connected"}
await self.broadcast(json.dumps(message))
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
async def send_personal_message(self, message: str, websocket: WebSocket):
await websocket.send_text(message)
async def broadcast(self, message: str):
for connection in self.active_connections:
await connection.send_text(message)
manager = ConnectionManager()
@app.websocket("/nebula/ws/{client_id}")
async def websocket_endpoint(websocket: WebSocket, client_id: int):
await manager.connect(websocket)
try:
while True:
data = await websocket.receive_text()
message = {"type": "control", "message": f"Client #{client_id} says: {data}"}
await manager.broadcast(json.dumps(message))
# await manager.send_personal_message(f"You wrote: {data}", websocket)
except WebSocketDisconnect:
manager.disconnect(websocket)
message = {"type": "control", "message": f"Client #{client_id} left the chat"}
await manager.broadcast(json.dumps(message))
templates = Jinja2Templates(directory=settings.templates_dir)
def datetimeformat(value, format="%B %d, %Y %H:%M"):
return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S").strftime(format)
def add_global_context(request: Request):
return {
"is_production": settings.production,
}
templates.env.filters["datetimeformat"] = datetimeformat
templates.env.globals.update(add_global_context=add_global_context)
def get_session(request: Request) -> Dict:
return request.session
def set_default_user():
username = os.environ.get("NEBULA_DEFAULT_USER", "admin")
password = os.environ.get("NEBULA_DEFAULT_PASSWORD", "admin")
if not list_users():
add_user(username, password, "admin")
if not verify_hash_algorithm(username):
update_user(username, password, "admin")
@app.on_event("startup")
async def startup_event():
await initialize_databases()
set_default_user()
nodes_registration = {}
scenarios_list_length = 0
scenarios_finished = 0
# Detect CTRL+C from parent process
def signal_handler(signal, frame):
logging.info("You pressed Ctrl+C [frontend]!")
scenario_set_all_status_to_finished()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
@app.exception_handler(StarletteHTTPException)
async def custom_http_exception_handler(request: Request, exc: StarletteHTTPException):
context = {"request": request, "session": request.session}
if exc.status_code == status.HTTP_401_UNAUTHORIZED:
return templates.TemplateResponse("401.html", context, status_code=exc.status_code)
elif exc.status_code == status.HTTP_403_FORBIDDEN:
return templates.TemplateResponse("403.html", context, status_code=exc.status_code)
elif exc.status_code == status.HTTP_404_NOT_FOUND:
return templates.TemplateResponse("404.html", context, status_code=exc.status_code)
elif exc.status_code == status.HTTP_405_METHOD_NOT_ALLOWED:
return templates.TemplateResponse("405.html", context, status_code=exc.status_code)
elif exc.status_code == status.HTTP_413_REQUEST_ENTITY_TOO_LARGE:
return templates.TemplateResponse("413.html", context, status_code=exc.status_code)
return await request.app.default_exception_handler(request, exc)
@app.get("/", response_class=HTMLResponse)
async def index():
return RedirectResponse(url="/nebula")
@app.get("/nebula", response_class=HTMLResponse)
@app.get("/nebula/", response_class=HTMLResponse)
async def nebula_home(request: Request):
alerts = []
return templates.TemplateResponse("index.html", {"request": request, "alerts": alerts})
@app.get("/nebula/dashboard/{scenario_name}/private", response_class=HTMLResponse)
async def nebula_dashboard_private(request: Request, scenario_name: str, session: Dict = Depends(get_session)):
if "user" in session:
return templates.TemplateResponse("private.html", {"request": request, "scenario_name": scenario_name})
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
@app.get("/nebula/admin", response_class=HTMLResponse)
async def nebula_admin(request: Request, session: Dict = Depends(get_session)):
if session.get("role") == "admin":
user_list = list_users(all_info=True)
user_table = zip(range(1, len(user_list) + 1), [user[0] for user in user_list], [user[2] for user in user_list])
return templates.TemplateResponse("admin.html", {"request": request, "users": user_table})
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
@app.post("/nebula/dashboard/{scenario_name}/save_note")
async def save_note_for_scenario(scenario_name: str, request: Request, session: Dict = Depends(get_session)):
if "user" in session:
data = await request.json()
notes = data["notes"]
try:
save_notes(scenario_name, notes)
return JSONResponse({"status": "success"})
except Exception as e:
logging.error(e)
return JSONResponse({"status": "error", "message": "Could not save the notes"}, status_code=500)
else:
return JSONResponse({"status": "error", "message": "User not logged in"}, status_code=401)
@app.get("/nebula/dashboard/{scenario_name}/notes")
async def get_notes_for_scenario(scenario_name: str):
notes_record = get_notes(scenario_name)
if notes_record:
notes_data = dict(zip(notes_record.keys(), notes_record))
return JSONResponse({"status": "success", "notes": notes_data["scenario_notes"]})
else:
return JSONResponse({"status": "error", "message": "Notes not found for the specified scenario"})
@app.post("/nebula/login")
async def nebula_login(request: Request, session: Dict = Depends(get_session), user: str = Form(...), password: str = Form(...)):
user_submitted = user.upper()
if (user_submitted in list_users()) and verify(user_submitted, password):
user_info = get_user_info(user_submitted)
session["user"] = user_submitted
session["role"] = user_info[2]
return JSONResponse({"message": "Login successful"}, status_code=200)
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
@app.get("/nebula/logout")
async def nebula_logout(request: Request, session: Dict = Depends(get_session)):
session.pop("user", None)
return RedirectResponse(url="/nebula")
@app.get("/nebula/user/delete/{user}/")
async def nebula_delete_user(user: str, request: Request, session: Dict = Depends(get_session)):
if session.get("role") == "admin":
if user == "ADMIN": # ADMIN account can't be deleted.
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
if user == session["user"]: # Current user can't delete himself.
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
delete_user_from_db(user)
return RedirectResponse(url="/nebula/admin")
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
@app.post("/nebula/user/add")
async def nebula_add_user(request: Request, session: Dict = Depends(get_session), user: str = Form(...), password: str = Form(...), role: str = Form(...)):
if session.get("role") == "admin": # only Admin should be able to add user.
user_list = list_users(all_info=True)
if user.upper() in user_list:
return RedirectResponse(url="/nebula/admin")
elif " " in user or "'" in user or '"' in user:
return RedirectResponse(url="/nebula/admin")
else:
add_user(user, password, role)
return RedirectResponse(url="/nebula/admin")
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
@app.post("/nebula/user/update")
async def nebula_update_user(request: Request, session: Dict = Depends(get_session), user: str = Form(...), password: str = Form(...), role: str = Form(...)):
if session.get("role") == "admin":
user_list = list_users()
if user not in user_list:
return RedirectResponse(url="/nebula/admin")
else:
update_user(user, password, role)
return RedirectResponse(url="/nebula/admin")
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
@app.get("/nebula/api/dashboard/runningscenario", response_class=JSONResponse)
async def nebula_dashboard_runningscenario():
scenario_running = get_running_scenario()
if scenario_running:
scenario_running_as_dict = dict(scenario_running)
scenario_running_as_dict["scenario_status"] = "running"
return JSONResponse(scenario_running_as_dict)
else:
return JSONResponse({"scenario_status": "not running"})
@app.get("/nebula/api/dashboard", response_class=JSONResponse)
@app.get("/nebula/dashboard", response_class=HTMLResponse)
async def nebula_dashboard(request: Request, session: Dict = Depends(get_session)):
if "user" in session.keys():
scenarios = get_all_scenarios_and_check_completed() # Get all scenarios after checking if they are completed
scenario_running = get_running_scenario()
else:
scenarios = None
scenario_running = None
bool_completed = False
if scenario_running:
bool_completed = scenario_running[5] == "completed"
if scenarios:
if request.url.path == "/nebula/dashboard":
return templates.TemplateResponse(
"dashboard.html",
{
"request": request,
"scenarios": scenarios,
"scenarios_list_length": scenarios_list_length,
"scenarios_finished": scenarios_finished,
"scenario_running": scenario_running,
"scenario_completed": bool_completed,
"user_logged_in": session.get("user"),
},
)
elif request.url.path == "/nebula/api/dashboard":
scenarios_as_dict = [dict(row) for row in scenarios]
return JSONResponse(scenarios_as_dict)
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
else:
if request.url.path == "/nebula/dashboard":
return templates.TemplateResponse(
"dashboard.html",
{
"request": request,
"user_logged_in": session.get("user"),
},
)
elif request.url.path == "/nebula/api/dashboard":
return JSONResponse({"scenarios_status": "not found in database"})
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
@app.get("/nebula/api/dashboard/{scenario_name}/monitor", response_class=JSONResponse)
@app.get("/nebula/dashboard/{scenario_name}/monitor", response_class=HTMLResponse)
async def nebula_dashboard_monitor(scenario_name: str, request: Request, session: Dict = Depends(get_session)):
scenario = get_scenario_by_name(scenario_name)
if scenario:
nodes_list = list_nodes_by_scenario_name(scenario_name)
if nodes_list:
nodes_config = []
nodes_status = []
for node in nodes_list:
nodes_config.append((node[2], node[3], node[4])) # IP, Port, Role
if datetime.datetime.now() - datetime.datetime.strptime(node[8], "%Y-%m-%d %H:%M:%S.%f") > datetime.timedelta(seconds=25):
nodes_status.append(False)
else:
nodes_status.append(True)
nodes_table = zip(
[x[0] for x in nodes_list], # UID
[x[1] for x in nodes_list], # IDX
[x[2] for x in nodes_list], # IP
[x[3] for x in nodes_list], # Port
[x[4] for x in nodes_list], # Role
[x[5] for x in nodes_list], # Neighbors
[x[6] for x in nodes_list], # Latitude
[x[7] for x in nodes_list], # Longitude
[x[8] for x in nodes_list], # Timestamp
[x[9] for x in nodes_list], # Federation
[x[10] for x in nodes_list], # Round
[x[11] for x in nodes_list], # Scenario name
[x[12] for x in nodes_list], # Run hash
nodes_status, # Status
)
topology_path = Utils.check_path(settings.config_dir, os.path.join(scenario_name, "topology.png"))
if os.path.exists(topology_path):
latest_participant_file_mtime = max([os.path.getmtime(os.path.join(settings.config_dir, scenario_name, f"participant_{node[1]}.json")) for node in nodes_list])
if os.path.getmtime(topology_path) < latest_participant_file_mtime:
update_topology(scenario[0], nodes_list, nodes_config)
else:
update_topology(scenario[0], nodes_list, nodes_config)
if request.url.path == f"/nebula/dashboard/{scenario_name}/monitor":
return templates.TemplateResponse(
"monitor.html",
{
"request": request,
"scenario_name": scenario_name,
"scenario": scenario,
"nodes": nodes_table,
"user_logged_in": session.get("user"),
},
)
elif request.url.path == f"/nebula/api/dashboard/{scenario_name}/monitor":
return JSONResponse(
{
"scenario_status": scenario[5],
"nodes_table": list(nodes_table),
"scenario_name": scenario[0],
"scenario_title": scenario[3],
"scenario_description": scenario[4],
}
)
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
else:
if request.url.path == f"/nebula/dashboard/{scenario_name}/monitor":
return templates.TemplateResponse(
"monitor.html",
{
"request": request,
"scenario_name": scenario_name,
"scenario": scenario,
"nodes": [],
"user_logged_in": session.get("user"),
},
)
elif request.url.path == f"/nebula/api/dashboard/{scenario_name}/monitor":
return JSONResponse(
{
"scenario_status": scenario[5],
"nodes_table": [],
"scenario_name": scenario[0],
"scenario_title": scenario[3],
"scenario_description": scenario[4],
}
)
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
else:
if request.url.path == f"/nebula/dashboard/{scenario_name}/monitor":
return templates.TemplateResponse(
"monitor.html",
{
"request": request,
"scenario_name": scenario_name,
"scenario": None,
"nodes": [],
"user_logged_in": session.get("user"),
},
)
elif request.url.path == f"/nebula/api/dashboard/{scenario_name}/monitor":
return JSONResponse({"scenario_status": "not exists"})
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
def update_topology(scenario_name, nodes_list, nodes_config):
import numpy as np
nodes = []
for node in nodes_list:
nodes.append(node[2] + ":" + str(node[3]))
matrix = np.zeros((len(nodes), len(nodes)))
for node in nodes_list:
for neighbour in node[5].split(" "):
if neighbour != "":
if neighbour in nodes:
matrix[
nodes.index(node[2] + ":" + str(node[3])),
nodes.index(neighbour),
] = 1
from nebula.addons.topologymanager import TopologyManager
tm = TopologyManager(n_nodes=len(nodes_list), topology=matrix, scenario_name=scenario_name)
tm.update_nodes(nodes_config)
tm.draw_graph(path=os.path.join(settings.config_dir, scenario_name, f"topology.png"))
@app.post("/nebula/dashboard/{scenario_name}/node/update")
async def nebula_update_node(scenario_name: str, request: Request, session: Dict = Depends(get_session)):
if request.method == "POST":
if request.headers.get("content-type") == "application/json":
config = await request.json()
timestamp = datetime.datetime.now()
# Update the node in database
await update_node_record(
str(config["device_args"]["uid"]),
str(config["device_args"]["idx"]),
str(config["network_args"]["ip"]),
str(config["network_args"]["port"]),
str(config["device_args"]["role"]),
str(config["network_args"]["neighbors"]),
str(config["mobility_args"]["latitude"]),
str(config["mobility_args"]["longitude"]),
str(timestamp),
str(config["scenario_args"]["federation"]),
str(config["federation_args"]["round"]),
str(config["scenario_args"]["name"]),
str(config["tracking_args"]["run_hash"]),
)
neighbors_distance = config["mobility_args"]["neighbors_distance"]
node_update = {
"type": "node_update",
"scenario_name": scenario_name,
"uid": config["device_args"]["uid"],
"idx": config["device_args"]["idx"],
"ip": config["network_args"]["ip"],
"port": str(config["network_args"]["port"]),
"role": config["device_args"]["role"],
"neighbors": config["network_args"]["neighbors"],
"latitude": config["mobility_args"]["latitude"],
"longitude": config["mobility_args"]["longitude"],
"timestamp": str(timestamp),
"federation": config["scenario_args"]["federation"],
"round": config["federation_args"]["round"],
"name": config["scenario_args"]["name"],
"status": True,
"neighbors_distance": neighbors_distance,
}
try:
await manager.broadcast(json.dumps(node_update))
except Exception as e:
logging.error(f"Error sending node_update to socketio: {e}")
pass
return JSONResponse({"message": "Node updated", "status": "success"}, status_code=200)
else:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)
@app.post("/nebula/dashboard/{scenario_name}/node/register")
async def nebula_register_node(scenario_name: str, request: Request):
if request.headers.get("content-type") == "application/json":
data = await request.json()
node = data["node"]
logging.info(f"Registering node {node} for scenario {scenario_name}")
async with nodes_registration[scenario_name]["condition"]:
nodes_registration[scenario_name]["nodes"].add(node)
logging.info(f"Node {node} registered")
if len(nodes_registration[scenario_name]["nodes"]) == nodes_registration[scenario_name]["n_nodes"]:
nodes_registration[scenario_name]["condition"].notify_all()
logging.info("All nodes registered")
return JSONResponse({"message": "Node registered", "status": "success"}, status_code=200)
else:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)
@app.get("/nebula/dashboard/scenarios/node/list")
async def nebula_list_all_scenarios(session: Dict = Depends(get_session)):
if "user" not in session.keys() or session["role"] not in ["admin", "user"]:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Unauthorized")
scenarios = {}
for scenario_name, scenario_info in nodes_registration.items():
scenarios[scenario_name] = list(scenario_info["nodes"])
if not scenarios:
return JSONResponse({"message": "No scenarios found", "status": "error"}, status_code=404)
return JSONResponse({"scenarios": scenarios, "status": "success"}, status_code=200)
@app.get("/nebula/dashboard/scenarios/node/erase")
async def nebula_erase_all_nodes(session: Dict = Depends(get_session)):
if "user" not in session.keys() or session["role"] not in ["admin", "user"]:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Unauthorized")
nodes_registration.clear()
return JSONResponse({"message": "All nodes erased", "status": "success"}, status_code=200)
@app.get("/nebula/dashboard/{scenario_name}/node/wait")
async def nebula_wait_nodes(scenario_name: str):
if scenario_name not in nodes_registration:
return JSONResponse({"message": "Scenario not found", "status": "error"}, status_code=404)
async with nodes_registration[scenario_name]["condition"]:
while len(nodes_registration[scenario_name]["nodes"]) < nodes_registration[scenario_name]["n_nodes"]:
await nodes_registration[scenario_name]["condition"].wait()
return JSONResponse({"message": "All nodes registered", "status": "success"}, status_code=200)
@app.get("/nebula/dashboard/{scenario_name}/node/{id}/infolog")
async def nebula_monitor_log(scenario_name: str, id: str):
logs = Utils.check_path(settings.log_dir, os.path.join(scenario_name, f"participant_{id}.log"))
if os.path.exists(logs):
return FileResponse(logs, media_type="text/plain", filename=f"participant_{id}.log")
else:
raise HTTPException(status_code=404, detail="Log file not found")
@app.get("/nebula/dashboard/{scenario_name}/node/{id}/infolog/{number}", response_class=PlainTextResponse)
async def nebula_monitor_log_x(scenario_name: str, id: str, number: int):
logs = Utils.check_path(settings.log_dir, os.path.join(scenario_name, f"participant_{id}.log"))
if os.path.exists(logs):
with open(logs, "r") as f:
lines = f.readlines()[-number:]
lines = "".join(lines)
converter = Ansi2HTMLConverter()
html_text = converter.convert(lines, full=False)
return Response(content=html_text, media_type="text/plain")
else:
return Response(content="No logs available", media_type="text/plain")
@app.get("/nebula/dashboard/{scenario_name}/node/{id}/debuglog")
async def nebula_monitor_log_debug(scenario_name: str, id: str):
logs = Utils.check_path(settings.log_dir, os.path.join(scenario_name, f"participant_{id}_debug.log"))
if os.path.exists(logs):
return FileResponse(logs, media_type="text/plain", filename=f"participant_{id}_debug.log")
else:
raise HTTPException(status_code=404, detail="Log file not found")
@app.get("/nebula/dashboard/{scenario_name}/node/{id}/errorlog")
async def nebula_monitor_log_error(scenario_name: str, id: str):
logs = Utils.check_path(settings.log_dir, os.path.join(scenario_name, f"participant_{id}_error.log"))
if os.path.exists(logs):
return FileResponse(logs, media_type="text/plain", filename=f"participant_{id}_error.log")
else:
raise HTTPException(status_code=404, detail="Log file not found")
@app.get("/nebula/dashboard/{scenario_name}/topology/image/")
async def nebula_monitor_image(scenario_name: str):
topology_image = Utils.check_path(settings.log_dir, os.path.join(scenario_name, "topology.png"))
if os.path.exists(topology_image):
return FileResponse(topology_image, media_type="image/png")
else:
raise HTTPException(status_code=404, detail="Topology image not found")
def stop_scenario(scenario_name):
from nebula.scenarios import ScenarioManagement
ScenarioManagement.stop_participants()
ScenarioManagement.stop_blockchain()
scenario_set_status_to_finished(scenario_name)
def stop_all_scenarios():
from nebula.scenarios import ScenarioManagement
ScenarioManagement.stop_participants()
ScenarioManagement.stop_blockchain()
scenario_set_all_status_to_finished()
@app.get("/nebula/dashboard/{scenario_name}/stop/{stop_all}")
async def nebula_stop_scenario(scenario_name: str, stop_all: bool, request: Request, session: Dict = Depends(get_session)):
if "user" in session.keys():
if session["role"] == "demo":
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
elif session["role"] == "user":
if not check_scenario_with_role(session["role"], scenario_name):
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
if stop_all:
stop_all_scenarios_event.set()
global scenarios_list_length
global scenarios_finished
scenarios_list_length = 0
scenarios_finished = 0
stop_scenario(scenario_name)
else:
finish_scenario_event.set()
stop_scenario(scenario_name)
return RedirectResponse(url="/nebula/dashboard")
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
def remove_scenario(scenario_name=None):
from nebula.scenarios import ScenarioManagement
if settings.advanced_analytics:
from aim.sdk.repo import Repo
# NEBULALOGGER START
try:
repo = Repo.from_path(f"{settings.log_dir}")
list_tuples_participant_hash = get_run_hashes_scenario(scenario_name)
hashes = [tuple[1] for tuple in list_tuples_participant_hash]
logging.info(f"Removing statistics from {scenario_name}: {hashes}")
success, remaining_runs = repo.delete_runs(hashes)
if success:
logging.info(f"Successfully deleted {len(hashes)} runs.")
else:
logging.info("Something went wrong while deleting runs.")
logging.info(f"Remaining runs: {remaining_runs}")
except Exception as e:
logging.error(f"Error removing statistics from {scenario_name}: {e}")
pass
# NEBULALOGGER END
# Remove registered nodes and conditions
nodes_registration.pop(scenario_name, None)
remove_nodes_by_scenario_name(scenario_name)
remove_scenario_by_name(scenario_name)
remove_note(scenario_name)
ScenarioManagement.remove_files_by_scenario(scenario_name)
@app.get("/nebula/dashboard/{scenario_name}/remove")
async def nebula_remove_scenario(scenario_name: str, request: Request, session: Dict = Depends(get_session)):
if "user" in session.keys():
if session["role"] == "demo":
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
elif session["role"] == "user":
if not check_scenario_with_role(session["role"], scenario_name):
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
remove_scenario(scenario_name)
return RedirectResponse(url="/nebula/dashboard")
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
if settings.advanced_analytics:
logging.info("Advanced analytics enabled")
# NEBULALOGGER START
def get_tracking_hash_scenario(scenario_name):
import requests
url = f"http://127.0.0.1:{settings.statistics_port}/nebula/statistics/api/experiments"
# Get JSON data from the URL
response = requests.get(url)
if response.status_code == 200:
experiments = response.json()
for experiment in experiments:
if experiment["name"] == scenario_name:
return experiment["id"]
return None
@app.get("/nebula/dashboard/statistics/", response_class=HTMLResponse)
@app.get("/nebula/dashboard/{scenario_name}/statistics/", response_class=HTMLResponse)
async def nebula_dashboard_statistics(request: Request, scenario_name: str = None):
statistics_url = "/nebula/statistics/"
if scenario_name is not None:
experiment_hash = get_tracking_hash_scenario(scenario_name=scenario_name)
statistics_url += f"experiments/{experiment_hash}/runs"
return templates.TemplateResponse("statistics.html", {"request": request, "statistics_url": statistics_url})
@app.get("/nebula/dashboard/{scenario_name}/node/{hash}/metrics", response_class=HTMLResponse)
async def nebula_dashboard_node_metrics(request: Request, scenario_name: str, hash: str):
statistics_url = f"/nebula/statistics/runs/{hash}/metrics"
return templates.TemplateResponse("statistics.html", {"request": request, "statistics_url": statistics_url})
@app.api_route("/nebula/statistics/", methods=["GET", "POST"])
@app.api_route("/nebula/statistics/{path:path}", methods=["GET", "POST"])
async def statistics_proxy(request: Request, path: str = None, session: Dict = Depends(get_session)):
if "user" in session.keys():
query_string = urlencode(request.query_params)
url = f"http://127.0.0.1:{settings.statistics_port}/nebula/statistics"
url = f"{url}{('/' + path) if path else '/'}" + (f"?{query_string}" if query_string else "")
headers = {key: value for key, value in request.headers.items() if key.lower() != "host"}
response = requests.request(
method=request.method,
url=url,
headers=headers,
data=await request.body(),
cookies=request.cookies,
allow_redirects=False,
)
excluded_headers = [
"content-encoding",
"content-length",
"transfer-encoding",
"connection",
]
filtered_headers = [(name, value) for name, value in response.raw.headers.items() if name.lower() not in excluded_headers]
return Response(content=response.content, status_code=response.status_code, headers=dict(filtered_headers))
else:
raise HTTPException(status_code=401)
@app.get("/nebula/dashboard/{scenario_name}/download/metrics")
async def nebula_dashboard_download_metrics(scenario_name: str, request: Request, session: Dict = Depends(get_session)):
from aim.sdk.repo import Repo
if "user" in session.keys():
# Obtener las métricas del escenario
os.makedirs(Utils.check_path(settings.log_dir, os.path.join(scenario_name, "metrics")), exist_ok=True)
aim_repo = Repo.from_path("/nebula/nebula/app/logs")
query = "run.experiment == '{}'".format(scenario_name)
df = aim_repo.query_metrics(query).dataframe()
hash_to_participant = {hash: participant for participant, hash in get_run_hashes_scenario(scenario_name)}
df["participant"] = df["run.hash"].map(hash_to_participant)
df.drop(columns=["run", "run.hash", "metric.context", "epoch"], axis=1, inplace=True)
cols = df.columns.tolist()
cols.remove("participant")
cols.remove("metric.name")
df = df.reindex(columns=["participant", "metric.name"] + cols)
for name, group in df.groupby("participant"):
group.to_csv(
os.path.join(settings.log_dir, scenario_name, "metrics", f"{name}.csv"),
index=True,
)
# Crear un archivo zip con las métricas, enviarlo al usuario y eliminarlo
memory_file = io.BytesIO()
with zipfile.ZipFile(memory_file, "w", zipfile.ZIP_DEFLATED) as zipf:
zipdir(os.path.join(settings.log_dir, scenario_name, "metrics"), zipf)
memory_file.seek(0)
return StreamingResponse(memory_file, media_type="application/zip", headers={"Content-Disposition": f"attachment; filename={scenario_name}_metrics.zip"})
else:
raise HTTPException(status_code=401)
# NEBULALOGGER END
else:
logging.info("Advanced analytics disabled")
# TENSORBOARD START
@app.get("/nebula/dashboard/statistics/", response_class=HTMLResponse)
@app.get("/nebula/dashboard/{scenario_name}/statistics/", response_class=HTMLResponse)
async def nebula_dashboard_statistics(request: Request, scenario_name: str = None):
statistics_url = "/nebula/statistics/"
if scenario_name is not None:
statistics_url += f"?smoothing=0&runFilter={scenario_name}"
return templates.TemplateResponse("statistics.html", {"request": request, "statistics_url": statistics_url})
@app.api_route("/nebula/statistics/", methods=["GET", "POST"])
@app.api_route("/nebula/statistics/{path:path}", methods=["GET", "POST"])
async def statistics_proxy(request: Request, path: str = None, session: Dict = Depends(get_session)):
if "user" in session.keys():
query_string = urlencode(request.query_params)
url = f"http://localhost:8080"
tensorboard_url = f"{url}{('/' + path) if path else ''}" + ("?" + query_string if query_string else "")
headers = {key: value for key, value in request.headers.items() if key.lower() != "host"}
response = requests.request(
method=request.method,
url=tensorboard_url,
headers=headers,
data=await request.body(),
cookies=request.cookies,
allow_redirects=False,
)
excluded_headers = [
"content-encoding",
"content-length",
"transfer-encoding",
"connection",
]
filtered_headers = [(name, value) for name, value in response.raw.headers.items() if name.lower() not in excluded_headers]
if "text/html" in response.headers["Content-Type"]:
content = response.text
content = content.replace("url(/", f"url(/nebula/statistics/")
content = content.replace('src="/', f'src="/nebula/statistics/')
content = content.replace('href="/', f'href="/nebula/statistics/')
response = Response(content, response.status_code, dict(filtered_headers))
return response
if path and path.endswith(".js"):
content = response.text
content = content.replace("experiment/${s}/data/plugin", "nebula/statistics/experiment/${s}/data/plugin")
response = Response(content, response.status_code, dict(filtered_headers))
return response
return Response(response.content, response.status_code, dict(filtered_headers))
else:
raise HTTPException(status_code=401)
@app.get("/experiment/{path:path}")
@app.post("/experiment/{path:path}")
async def metrics_proxy(path: str = None, request: Request = None):
query_params = request.query_params
new_url = "/nebula/statistics/experiment/" + path
if query_params:
new_url += "?" + urlencode(query_params)
return RedirectResponse(url=new_url)
# TENSORBOARD END
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(
os.path.join(root, file),
os.path.relpath(os.path.join(root, file), os.path.join(path, "..")),
)
@app.get("/nebula/dashboard/{scenario_name}/download/logs")
async def nebula_dashboard_download_logs_metrics(scenario_name: str, request: Request, session: Dict = Depends(get_session)):
if "user" in session.keys():
log_folder = Utils.check_path(settings.log_dir, scenario_name)
config_folder = Utils.check_path(settings.config_dir, scenario_name)
if os.path.exists(log_folder) and os.path.exists(config_folder):
# Crear un archivo zip con los logs y los archivos de configuración, enviarlo al usuario
memory_file = io.BytesIO()
with zipfile.ZipFile(memory_file, "w", zipfile.ZIP_DEFLATED) as zipf:
zipdir(log_folder, zipf)
zipdir(config_folder, zipf)
memory_file.seek(0)
return StreamingResponse(memory_file, media_type="application/zip", headers={"Content-Disposition": f"attachment; filename={scenario_name}.zip"})
else:
raise HTTPException(status_code=404, detail="Log or config folder not found")
else:
raise HTTPException(status_code=401)
@app.get("/nebula/dashboard/deployment/", response_class=HTMLResponse)
async def nebula_dashboard_deployment(request: Request, session: Dict = Depends(get_session)):
scenario_running = get_running_scenario()
return templates.TemplateResponse("deployment.html", {"request": request, "scenario_running": scenario_running, "user_logged_in": session.get("user"), "gpu_available": settings.gpu_available})
def attack_node_assign(
nodes,
federation,
attack,
poisoned_node_percent,
poisoned_sample_percent,
poisoned_noise_percent,
):
"""Identify which nodes will be attacked"""
import random
import math
attack_matrix = []
n_nodes = len(nodes)
if n_nodes == 0:
return attack_matrix
nodes_index = []
# Get the nodes index
if federation == "DFL":
nodes_index = list(nodes.keys())
else:
for node in nodes:
if nodes[node]["role"] != "server":
nodes_index.append(node)
n_nodes = len(nodes_index)
# Number of attacked nodes, round up
num_attacked = int(math.ceil(poisoned_node_percent / 100 * n_nodes))
if num_attacked > n_nodes:
num_attacked = n_nodes
# Get the index of attacked nodes
attacked_nodes = random.sample(nodes_index, num_attacked)
# Assign the role of each node
for node in nodes:
node_att = "No Attack"
attack_sample_persent = 0
poisoned_ratio = 0
if (node in attacked_nodes) or (nodes[node]["malicious"]):
node_att = attack
attack_sample_persent = poisoned_sample_percent / 100
poisoned_ratio = poisoned_noise_percent / 100
nodes[node]["attacks"] = node_att
nodes[node]["poisoned_sample_percent"] = attack_sample_persent
nodes[node]["poisoned_ratio"] = poisoned_ratio
attack_matrix.append([node, node_att, attack_sample_persent, poisoned_ratio])
return nodes, attack_matrix
import math
def mobility_assign(nodes, mobile_participants_percent):
"""Assign mobility to nodes"""
import random
# Number of mobile nodes, round down
num_mobile = math.floor(mobile_participants_percent / 100 * len(nodes))
if num_mobile > len(nodes):
num_mobile = len(nodes)
# Get the index of mobile nodes
mobile_nodes = random.sample(list(nodes.keys()), num_mobile)
# Assign the role of each node
for node in nodes:
node_mob = False
if node in mobile_nodes:
node_mob = True
nodes[node]["mobility"] = node_mob
return nodes
# Stop all scenarios in the scenarios_list
stop_all_scenarios_event = asyncio.Event()
# Finish actual scenario
finish_scenario_event = asyncio.Event()
# Nodes that completed the experiment
nodes_finished = []
# Recieve a stopped node
@app.post("/nebula/dashboard/{scenario_name}/node/done")
async def node_stopped(scenario_name: str, request: Request):
if request.headers.get("content-type") == "application/json":
data = await request.json()
nodes_finished.append(data["idx"])
nodes_list = list_nodes_by_scenario_name(scenario_name)
finished = True
# Check if all the nodes of the scenario have finished the experiment
for node in nodes_list:
if str(node[1]) not in map(str, nodes_finished):
finished = False
if finished:
nodes_finished.clear()
finish_scenario_event.set()
return JSONResponse(status_code=200, content={"message": "All nodes finished, scenario marked as completed."})
else:
return JSONResponse(status_code=200, content={"message": "Node marked as finished, waiting for other nodes."})
else:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)
async def run_scenario(scenario_data, role):
from nebula.scenarios import ScenarioManagement
import subprocess
# Manager for the actual scenario
scenarioManagement = ScenarioManagement(scenario_data)
scenario_update_record(
scenario_name=scenarioManagement.scenario_name,
start_time=scenarioManagement.start_date_scenario,
end_time="",
status="running",
title=scenario_data["scenario_title"],
description=scenario_data["scenario_description"],
network_subnet=scenario_data["network_subnet"],
model=scenario_data["model"],
dataset=scenario_data["dataset"],
rounds=scenario_data["rounds"],
role=role,
)
# Run the actual scenario
try:
if scenarioManagement.scenario.mobility:
additional_participants = scenario_data["additional_participants"]
schema_additional_participants = scenario_data["schema_additional_participants"]
scenarioManagement.load_configurations_and_start_nodes(additional_participants, schema_additional_participants)
else:
scenarioManagement.load_configurations_and_start_nodes()
except subprocess.CalledProcessError as e:
logging.error(f"Error docker-compose up: {e}")
return
nodes_registration[scenarioManagement.scenario_name] = {
"n_nodes": scenario_data["n_nodes"],
"nodes": set(),
}
nodes_registration[scenarioManagement.scenario_name]["condition"] = asyncio.Condition()
return scenarioManagement.scenario_name
# Deploy the list of scenarios
async def run_scenarios(data, role):
global scenarios_finished
for scenario_data in data:
finish_scenario_event.clear()
logging.info(f"Running scenario {scenario_data['scenario_title']}")
scenario_name = await run_scenario(scenario_data, role)
# Waits till the scenario is completed
while not finish_scenario_event.is_set() and not stop_all_scenarios_event.is_set():
await asyncio.sleep(1)
if stop_all_scenarios_event.is_set():
stop_all_scenarios_event.clear()
stop_scenario(scenario_name)
return
scenarios_finished = scenarios_finished + 1
stop_scenario(scenario_name)
await asyncio.sleep(5)
@app.post("/nebula/dashboard/deployment/run")
async def nebula_dashboard_deployment_run(request: Request, background_tasks: BackgroundTasks, session: Dict = Depends(get_session)):
if "user" not in session.keys() or session["role"] in ["demo", "user"] and get_running_scenario():
raise HTTPException(status_code=401)
if request.headers.get("content-type") != "application/json":
raise HTTPException(status_code=401)
stop_all_scenarios()
finish_scenario_event.clear()
stop_all_scenarios_event.clear()
data = await request.json()
global scenarios_finished, scenarios_list_length
scenarios_finished = 0
scenarios_list_length = len(data)
logging.info(f"Running deployment with {len(data)} scenarios")
background_tasks.add_task(run_scenarios, data, session["role"])
return RedirectResponse(url="/nebula/dashboard", status_code=303)
# return Response(content="Success", status_code=200)
if __name__ == "__main__":
# Parse args from command line
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=5000, help="Port to run the frontend on.")
args = parser.parse_args()
logging.info(f"Starting frontend on port {args.port}")
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=args.port)
| 48,135 | Python | .py | 947 | 41.473073 | 196 | 0.646152 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,933 | utils.py | enriquetomasmb_nebula/nebula/frontend/utils.py | import os
class Utils:
def __init__(self):
self.init()
@classmethod
def check_path(cls, base_path, relative_path):
full_path = os.path.normpath(os.path.join(base_path, relative_path))
base_path = os.path.normpath(base_path)
if not full_path.startswith(base_path):
raise Exception("Not allowed")
return full_path
| 404 | Python | .py | 11 | 27.363636 | 76 | 0.641096 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,934 | database.py | enriquetomasmb_nebula/nebula/frontend/database.py | import datetime
import sqlite3
import datetime
from argon2 import PasswordHasher
import sqlite3
import asyncio
import aiosqlite
user_db_file_location = "databases/users.db"
node_db_file_location = "databases/nodes.db"
scenario_db_file_location = "databases/scenarios.db"
notes_db_file_location = "databases/notes.db"
_node_lock = asyncio.Lock()
PRAGMA_SETTINGS = [
"PRAGMA journal_mode=WAL;",
"PRAGMA synchronous=NORMAL;",
"PRAGMA journal_size_limit=1048576;",
"PRAGMA cache_size=10000;",
"PRAGMA temp_store=MEMORY;",
"PRAGMA cache_spill=0;"
]
async def setup_database(db_file_location):
async with aiosqlite.connect(db_file_location) as db:
for pragma in PRAGMA_SETTINGS:
await db.execute(pragma)
await db.commit()
async def initialize_databases():
await setup_database(user_db_file_location)
await setup_database(node_db_file_location)
await setup_database(scenario_db_file_location)
await setup_database(notes_db_file_location)
async with aiosqlite.connect(user_db_file_location) as conn:
_c = await conn.cursor()
await _c.execute(
"""
CREATE TABLE IF NOT EXISTS users (
user TEXT PRIMARY KEY,
password TEXT NOT NULL,
role TEXT NOT NULL
);
"""
)
await conn.commit()
async with aiosqlite.connect(node_db_file_location) as conn:
_c = await conn.cursor()
await _c.execute(
"""
CREATE TABLE IF NOT EXISTS nodes (
uid TEXT PRIMARY KEY,
idx TEXT NOT NULL,
ip TEXT NOT NULL,
port TEXT NOT NULL,
role TEXT NOT NULL,
neighbors TEXT NOT NULL,
latitude TEXT NOT NULL,
longitude TEXT NOT NULL,
timestamp TEXT NOT NULL,
federation TEXT NOT NULL,
round TEXT NOT NULL,
scenario TEXT NOT NULL,
hash TEXT NOT NULL
);
"""
)
await conn.commit()
async with aiosqlite.connect(scenario_db_file_location) as conn:
_c = await conn.cursor()
await _c.execute(
"""
CREATE TABLE IF NOT EXISTS scenarios (
name TEXT PRIMARY KEY,
start_time TEXT NOT NULL,
end_time TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT NOT NULL,
status TEXT NOT NULL,
network_subnet TEXT NOT NULL,
model TEXT NOT NULL,
dataset TEXT NOT NULL,
rounds TEXT NOT NULL,
role TEXT NOT NULL
);
"""
)
await conn.commit()
async with aiosqlite.connect(notes_db_file_location) as conn:
_c = await conn.cursor()
await _c.execute(
"""
CREATE TABLE IF NOT EXISTS notes (
scenario TEXT PRIMARY KEY,
scenario_notes TEXT NOT NULL
);
"""
)
await conn.commit()
def list_users(all_info=False):
with sqlite3.connect(user_db_file_location) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT * FROM users")
result = c.fetchall()
if not all_info:
result = [user["user"] for user in result]
return result
def get_user_info(user):
with sqlite3.connect(user_db_file_location) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
command = "SELECT * FROM users WHERE user = ?"
c.execute(command, (user,))
result = c.fetchone()
return result
def verify(user, password):
ph = PasswordHasher()
with sqlite3.connect(user_db_file_location) as conn:
c = conn.cursor()
c.execute("SELECT password FROM users WHERE user = ?", (user,))
result = c.fetchone()
if result:
try:
return ph.verify(result[0], password)
except:
return False
return False
def verify_hash_algorithm(user):
user = user.upper()
argon2_prefixes = ('$argon2i$', '$argon2id$')
with sqlite3.connect(user_db_file_location) as conn:
c = conn.cursor()
c.execute("SELECT password FROM users WHERE user = ?", (user,))
result = c.fetchone()
if result:
password_hash = result[0]
return password_hash.startswith(argon2_prefixes)
return False
def delete_user_from_db(user):
with sqlite3.connect(user_db_file_location) as conn:
c = conn.cursor()
c.execute("DELETE FROM users WHERE user = ?", (user,))
def add_user(user, password, role):
ph = PasswordHasher()
with sqlite3.connect(user_db_file_location) as conn:
c = conn.cursor()
c.execute("INSERT INTO users VALUES (?, ?, ?)", (user.upper(), ph.hash(password), role))
def update_user(user, password, role):
ph = PasswordHasher()
with sqlite3.connect(user_db_file_location) as conn:
c = conn.cursor()
c.execute("UPDATE users SET password = ?, role = ? WHERE user = ?", (ph.hash(password), role, user.upper()))
def list_nodes(scenario_name=None, sort_by="idx"):
# list all nodes in the database
try:
with sqlite3.connect(node_db_file_location) as conn:
c = conn.cursor()
if scenario_name:
command = "SELECT * FROM nodes WHERE scenario = ? ORDER BY " + sort_by + ";"
c.execute(command, (scenario_name,))
else:
command = "SELECT * FROM nodes ORDER BY " + sort_by + ";"
c.execute(command)
result = c.fetchall()
return result
except sqlite3.Error as e:
print(f"Error occurred while listing nodes: {e}")
return None
def list_nodes_by_scenario_name(scenario_name):
try:
with sqlite3.connect(node_db_file_location) as conn:
c = conn.cursor()
command = "SELECT * FROM nodes WHERE scenario = ? ORDER BY CAST(idx AS INTEGER) ASC;"
c.execute(command, (scenario_name,))
result = c.fetchall()
return result
except sqlite3.Error as e:
print(f"Error occurred while listing nodes by scenario name: {e}")
return None
async def update_node_record(node_uid, idx, ip, port, role, neighbors, latitude, longitude, timestamp, federation, federation_round, scenario, run_hash):
# Check if the node record with node_uid and scenario already exists in the database
# If it does, update the record
# If it does not, create a new record
# _conn = sqlite3.connect(node_db_file_location)
global _node_lock
async with _node_lock:
async with aiosqlite.connect(node_db_file_location) as conn:
_c = await conn.cursor()
command = "SELECT * FROM nodes WHERE uid = ? AND scenario = ?;"
await _c.execute(command, (node_uid, scenario))
result = await _c.fetchone()
if result is None:
# Create a new record
await _c.execute("INSERT INTO nodes VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (node_uid, idx, ip, port, role, neighbors, latitude, longitude, timestamp, federation, federation_round, scenario, run_hash))
else:
# Update the record
command = "UPDATE nodes SET idx = ?, ip = ?, port = ?, role = ?, neighbors = ?, latitude = ?, longitude = ?, timestamp = ?, federation = ?, round = ?, hash = ? WHERE uid = ? AND scenario = ?;"
await _c.execute(command, (idx, ip, port, role, neighbors, latitude, longitude, timestamp, federation, federation_round, run_hash, node_uid, scenario))
await conn.commit()
def remove_all_nodes():
with sqlite3.connect(node_db_file_location) as conn:
c = conn.cursor()
command = "DELETE FROM nodes;"
c.execute(command)
def remove_nodes_by_scenario_name(scenario_name):
with sqlite3.connect(node_db_file_location) as conn:
c = conn.cursor()
command = "DELETE FROM nodes WHERE scenario = ?;"
c.execute(command, (scenario_name,))
def get_run_hashes_scenario(scenario_name):
with sqlite3.connect(node_db_file_location) as conn:
c = conn.cursor()
command = "SELECT DISTINCT idx, hash FROM nodes WHERE scenario = ?;"
c.execute(command, (scenario_name,))
result = c.fetchall()
result_hashes = [(f"participant_{node[0]}", node[1]) for node in result]
return result_hashes
def get_all_scenarios(sort_by="start_time"):
with sqlite3.connect(scenario_db_file_location) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
if sort_by == "start_time":
command = """
SELECT * FROM scenarios
ORDER BY strftime('%Y-%m-%d %H:%M:%S', substr(start_time, 7, 4) || '-' || substr(start_time, 4, 2) || '-' || substr(start_time, 1, 2) || ' ' || substr(start_time, 12, 8));
"""
c.execute(command)
else:
command = "SELECT * FROM scenarios ORDER BY ?;"
c.execute(command, (sort_by,))
result = c.fetchall()
return result
def get_all_scenarios_and_check_completed(sort_by="start_time"):
with sqlite3.connect(scenario_db_file_location) as _conn:
_conn.row_factory = sqlite3.Row
_c = _conn.cursor()
if sort_by == "start_time":
command = """
SELECT * FROM scenarios
ORDER BY strftime('%Y-%m-%d %H:%M:%S', substr(start_time, 7, 4) || '-' || substr(start_time, 4, 2) || '-' || substr(start_time, 1, 2) || ' ' || substr(start_time, 12, 8));
"""
_c.execute(command)
else:
command = "SELECT * FROM scenarios ORDER BY ?;"
_c.execute(command, (sort_by,))
_c.execute(command)
result = _c.fetchall()
for scenario in result:
if scenario["status"] == "running":
if check_scenario_federation_completed(scenario["name"]):
scenario_set_status_to_completed(scenario["name"])
result = get_all_scenarios()
return result
def scenario_update_record(scenario_name, start_time, end_time, title, description, status, network_subnet, model, dataset, rounds, role):
_conn = sqlite3.connect(scenario_db_file_location)
_c = _conn.cursor()
command = "SELECT * FROM scenarios WHERE name = ?;"
_c.execute(command, (scenario_name,))
result = _c.fetchone()
if result is None:
# Create a new record
_c.execute("INSERT INTO scenarios VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (scenario_name, start_time, end_time, title, description, status, network_subnet, model, dataset, rounds, role))
else:
# Update the record
command = "UPDATE scenarios SET start_time = ?, end_time = ?, title = ?, description = ?, status = ?, network_subnet = ?, model = ?, dataset = ?, rounds = ?, role = ? WHERE name = ?;"
_c.execute(command, (start_time, end_time, title, description, status, network_subnet, model, dataset, rounds, role, scenario_name))
_conn.commit()
_conn.close()
def scenario_set_all_status_to_finished():
# Set all running scenarios to finished and update the end_time to the current time
_conn = sqlite3.connect(scenario_db_file_location)
_c = _conn.cursor()
command = "UPDATE scenarios SET status = 'finished', end_time = ? WHERE status = 'running';"
current_time = str(datetime.datetime.now())
_c.execute(command, (current_time,))
_conn.commit()
_conn.close()
def scenario_set_status_to_finished(scenario_name):
_conn = sqlite3.connect(scenario_db_file_location)
_c = _conn.cursor()
command = "UPDATE scenarios SET status = 'finished', end_time = ? WHERE name = ?;"
current_time = str(datetime.datetime.now())
_c.execute(command, (current_time, scenario_name))
_conn.commit()
_conn.close()
def scenario_set_status_to_completed(scenario_name):
try:
with sqlite3.connect(scenario_db_file_location) as _conn:
_c = _conn.cursor()
command = "UPDATE scenarios SET status = 'completed' WHERE name = ?;"
_c.execute(command, (scenario_name,))
_conn.commit()
_conn.close()
except sqlite3.Error as e:
print(f"Database error: {e}")
def get_running_scenario():
with sqlite3.connect(scenario_db_file_location) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
command = "SELECT * FROM scenarios WHERE status = ? OR status = ?;"
c.execute(command, ("running", "completed"))
result = c.fetchone()
return result
def get_completed_scenario():
with sqlite3.connect(scenario_db_file_location) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
command = "SELECT * FROM scenarios WHERE status = ?;"
c.execute(command, ("completed",))
result = c.fetchone()
return result
def get_scenario_by_name(scenario_name):
_conn = sqlite3.connect(scenario_db_file_location)
_c = _conn.cursor()
command = "SELECT * FROM scenarios WHERE name = ?;"
_c.execute(command, (scenario_name,))
result = _c.fetchone()
_conn.commit()
_conn.close()
return result
def remove_scenario_by_name(scenario_name):
_conn = sqlite3.connect(scenario_db_file_location)
_c = _conn.cursor()
command = "DELETE FROM scenarios WHERE name = ?;"
_c.execute(command, (scenario_name,))
_conn.commit()
_conn.close()
def check_scenario_federation_completed(scenario_name):
try:
# Connect to the scenario database to get the total rounds for the scenario
with sqlite3.connect(scenario_db_file_location) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT rounds FROM scenarios WHERE name = ?;", (scenario_name,))
scenario = c.fetchone()
if not scenario:
raise ValueError(f"Scenario '{scenario_name}' not found.")
total_rounds = scenario["rounds"]
# Connect to the node database to check the rounds for each node
with sqlite3.connect(node_db_file_location) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT round FROM nodes WHERE scenario = ?;", (scenario_name,))
nodes = c.fetchall()
if len(nodes) == 0:
return False
# Check if all nodes have completed the total rounds
return all(node["round"] == total_rounds for node in nodes)
except sqlite3.Error as e:
print(f"Database error: {e}")
return False
except Exception as e:
print(f"An error occurred: {e}")
return False
def check_scenario_with_role(role, scenario_name):
_conn = sqlite3.connect(scenario_db_file_location)
_c = _conn.cursor()
command = "SELECT * FROM scenarios WHERE role = ? AND name = ?;"
_c.execute(command, (role, scenario_name))
result = _c.fetchone()
_conn.commit()
_conn.close()
return result
def save_notes(scenario, notes):
try:
with sqlite3.connect(notes_db_file_location) as conn:
c = conn.cursor()
c.execute(
"""
INSERT INTO notes (scenario, scenario_notes) VALUES (?, ?)
ON CONFLICT(scenario) DO UPDATE SET scenario_notes = excluded.scenario_notes;
""",
(scenario, notes),
)
conn.commit()
except sqlite3.IntegrityError as e:
print(f"SQLite integrity error: {e}")
except sqlite3.Error as e:
print(f"SQLite error: {e}")
def get_notes(scenario):
with sqlite3.connect(notes_db_file_location) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT * FROM notes WHERE scenario = ?;", (scenario,))
result = c.fetchone()
return result
def remove_note(scenario):
with sqlite3.connect(notes_db_file_location) as conn:
c = conn.cursor()
c.execute("DELETE FROM notes WHERE scenario = ?;", (scenario,))
conn.commit()
if __name__ == "__main__":
print(list_users())
| 16,617 | Python | .py | 390 | 33.448718 | 226 | 0.602303 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,935 | mobility.py | enriquetomasmb_nebula/nebula/addons/mobility.py | import asyncio
import logging
import random
import math
import time
from nebula.addons.functions import print_msg_box
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from nebula.core.network.communications import CommunicationsManager
class Mobility:
def __init__(self, config, cm: "CommunicationsManager"):
logging.info(f"Starting mobility module...")
self.config = config
self.cm = cm
self.grace_time = self.config.participant["mobility_args"]["grace_time_mobility"]
self.period = self.config.participant["mobility_args"]["change_geo_interval"]
self.mobility = self.config.participant["mobility_args"]["mobility"]
self.mobility_type = self.config.participant["mobility_args"]["mobility_type"]
self.radius_federation = float(self.config.participant["mobility_args"]["radius_federation"])
self.scheme_mobility = self.config.participant["mobility_args"]["scheme_mobility"]
self.round_frequency = int(self.config.participant["mobility_args"]["round_frequency"])
# Protocol to change connections based on distance
self.max_distance_with_direct_connections = 300 # meters
self.max_movement_random_strategy = 100 # meters
self.max_movement_nearest_strategy = 100 # meters
self.max_initiate_approximation = self.max_distance_with_direct_connections * 1.2
# Network conditions based on distance
self.network_conditions = {100: {"bandwidth": "5Gbps", "delay": "5ms"}, 200: {"bandwidth": "2Gbps", "delay": "50ms"}, 300: {"bandwidth": "100Mbps", "delay": "200ms"}, float("inf"): {"bandwidth": "10Mbps", "delay": "1000ms"}}
# Current network conditions of each connection {addr: {bandwidth: "5Gbps", delay: "0ms"}}
self.current_network_conditions = {}
# Logging box with mobility information
mobility_msg = f"Mobility: {self.mobility}\nMobility type: {self.mobility_type}\nRadius federation: {self.radius_federation}\nScheme mobility: {self.scheme_mobility}\nEach {self.round_frequency} rounds"
print_msg_box(msg=mobility_msg, indent=2, title="Mobility information")
@property
def round(self):
return self.cm.get_round()
async def start(self):
asyncio.create_task(self.run_mobility())
async def run_mobility(self):
if not self.mobility:
return
await asyncio.sleep(self.grace_time)
while True:
await self.change_geo_location()
await self.change_connections_based_on_distance()
await asyncio.sleep(self.period)
async def change_geo_location_random_strategy(self, latitude, longitude):
logging.info(f"游늸 Changing geo location randomly")
# radius_in_degrees = self.radius_federation / 111000
max_radius_in_degrees = self.max_movement_random_strategy / 111000
radius = random.uniform(0, max_radius_in_degrees)
angle = random.uniform(0, 2 * math.pi)
latitude += radius * math.cos(angle)
longitude += radius * math.sin(angle)
await self.set_geo_location(latitude, longitude)
async def change_geo_location_nearest_neighbor_strategy(self, distance, latitude, longitude, neighbor_latitude, neighbor_longitude):
logging.info(f"游늸 Changing geo location towards the nearest neighbor")
scale_factor = min(1, self.max_movement_nearest_strategy / distance)
# Calcular el 치ngulo hacia el vecino
angle = math.atan2(neighbor_longitude - longitude, neighbor_latitude - latitude)
# Conversi칩n de movimiento m치ximo a grados
max_lat_change = self.max_movement_nearest_strategy / 111000 # Cambio en grados para latitud
max_lon_change = self.max_movement_nearest_strategy / (111000 * math.cos(math.radians(latitude))) # Cambio en grados para longitud
# Aplicar escala y direcci칩n
delta_lat = max_lat_change * math.cos(angle) * scale_factor
delta_lon = max_lon_change * math.sin(angle) * scale_factor
# Actualizar latitud y longitud
new_latitude = latitude + delta_lat
new_longitude = longitude + delta_lon
await self.set_geo_location(new_latitude, new_longitude)
async def set_geo_location(self, latitude, longitude):
if latitude < -90 or latitude > 90 or longitude < -180 or longitude > 180:
# If the new location is out of bounds, we keep the old location
latitude = self.config.participant["mobility_args"]["latitude"]
longitude = self.config.participant["mobility_args"]["longitude"]
self.config.participant["mobility_args"]["latitude"] = latitude
self.config.participant["mobility_args"]["longitude"] = longitude
logging.info(f"游늸 New geo location: {latitude}, {longitude}")
async def change_geo_location(self):
if self.mobility and (self.mobility_type == "topology" or self.mobility_type == "both"):
random.seed(time.time() + self.config.participant["device_args"]["idx"])
latitude = float(self.config.participant["mobility_args"]["latitude"])
longitude = float(self.config.participant["mobility_args"]["longitude"])
direct_connections = await self.cm.get_direct_connections()
undirect_connection = await self.cm.get_undirect_connections()
if len(undirect_connection) > len(direct_connections):
logging.info(f"游늸 Undirect Connections is higher than Direct Connections")
# Get neighbor closer to me
selected_neighbor = await self.cm.get_nearest_connections(top=1)
logging.info(f"游늸 Selected neighbor: {selected_neighbor}")
try:
neighbor_latitude, neighbor_longitude = selected_neighbor.get_geolocation()
distance = selected_neighbor.get_neighbor_distance()
if distance > self.max_initiate_approximation:
# If the distance is too big, we move towards the neighbor
await self.change_geo_location_nearest_neighbor_strategy(distance, latitude, longitude, neighbor_latitude, neighbor_longitude)
else:
await self.change_geo_location_random_strategy(latitude, longitude)
except Exception as e:
logging.info(f"游늸 Neighbor location/distance not found for {selected_neighbor.get_addr()}: {e}")
await self.change_geo_location_random_strategy(latitude, longitude)
else:
await self.change_geo_location_random_strategy(latitude, longitude)
else:
logging.error(f"游늸 Mobility type {self.mobility_type} not implemented")
return
async def change_connections_based_on_distance(self):
if self.mobility and (self.mobility_type == "topology" or self.mobility_type == "both"):
try:
# logging.info(f"游늸 Checking connections based on distance")
connections_topology = await self.cm.get_addrs_current_connections()
# logging.info(f"游늸 Connections of the topology: {connections_topology}")
if len(connections_topology) < 1:
# logging.error(f"游늸 Not enough connections for mobility")
return
# Nodes that are too far away should be marked as undirected connections, and closer nodes should be marked as directed connections.
for addr in connections_topology:
distance = self.cm.connections[addr].get_neighbor_distance()
if distance is None:
# If the distance is not found, we skip the node
continue
# logging.info(f"游늸 Distance to node {addr}: {distance}")
if not self.cm.connections[addr].get_direct() and distance < self.max_distance_with_direct_connections:
logging.info(f"游늸 Node {addr} is close enough [{distance}], adding to direct connections")
self.cm.connections[addr].set_direct(True)
else:
# 10% margin to avoid oscillations
if self.cm.connections[addr].get_direct() and distance > self.max_distance_with_direct_connections * 1.1:
logging.info(f"游늸 Node {addr} is too far away [{distance}], removing from direct connections")
self.cm.connections[addr].set_direct(False)
# Adapt network conditions of the connection based on distance
for threshold in sorted(self.network_conditions.keys()):
if distance < threshold:
conditions = self.network_conditions[threshold]
break
# Only update the network conditions if they have changed
if addr not in self.current_network_conditions or self.current_network_conditions[addr] != conditions:
# eth1 is the interface of the container that connects to the node network - eth0 is the interface of the container that connects to the frontend/backend
self.cm._set_network_conditions(interface="eth1", network=addr.split(":")[0], bandwidth=conditions["bandwidth"], delay=conditions["delay"], delay_distro="10ms", delay_distribution="normal", loss="0%", duplicate="0%", corrupt="0%", reordering="0%")
self.current_network_conditions[addr] = conditions
except KeyError as e:
# Except when self.cm.connections[addr] is not found (disconnected during the process)
logging.error(f"游늸 Connection {addr} not found: {e}")
return
except Exception as e:
logging.error(f"游늸 Error changing connections based on distance: {e}")
return
async def change_connections(self):
if self.mobility and (self.mobility_type == "topology" or self.mobility_type == "both") and self.round % self.round_frequency == 0:
logging.info(f"游늸 Changing connections")
current_connections = await self.cm.get_addrs_current_connections(only_direct=True)
potential_connections = await self.cm.get_addrs_current_connections(only_undirected=True)
logging.info(f"游늸 Current connections: {current_connections} | Potential future connections: {potential_connections}")
if len(current_connections) < 1 or len(potential_connections) < 1:
logging.error(f"游늸 Not enough connections for mobility")
return
if self.scheme_mobility == "random":
random_neighbor = random.choice(current_connections)
random_potential_neighbor = random.choice(potential_connections)
logging.info(f"游늸 Selected node(s) to disconnect: {random_neighbor}")
logging.info(f"游늸 Selected node(s) to connect: {random_potential_neighbor}")
await self.cm.disconnect(random_neighbor, mutual_disconnection=True)
await self.cm.connect(random_potential_neighbor, direct=True)
logging.info(f"游늸 New connections: {self.get_current_connections(only_direct=True)}")
logging.info(f"游늸 Neighbors in config: {self.config.participant['network_args']['neighbors']}")
else:
logging.error(f"游늸 Mobility scheme {self.scheme_mobility} not implemented")
return
| 11,702 | Python | .py | 168 | 55.982143 | 271 | 0.646315 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,936 | env.py | enriquetomasmb_nebula/nebula/addons/env.py | import logging
import os
import re
import sys
import platform
import requests
from nebula import __version__
def check_version():
# Check version of NEBULA (__version__ is defined in __init__.py) and compare with __version__ in https://raw.githubusercontent.com/enriquetomasmb/nebula/main/nebula/__init__.py
logging.info("Checking NEBULA version...")
try:
r = requests.get("https://raw.githubusercontent.com/enriquetomasmb/nebula/main/nebula/__init__.py")
if r.status_code == 200:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', r.text, re.MULTILINE).group(1)
if version != __version__:
logging.info(f"Your NEBULA version is {__version__} and the latest version is {version}. Please update your NEBULA version.")
logging.info("You can update your NEBULA version downloading the latest version from https://github.com/enriquetomasmb/nebula")
sys.exit(0)
else:
logging.info(f"Your NEBULA version is {__version__} and it is the latest version.")
except Exception as e:
logging.error(f"Error while checking NEBULA version: {e}")
sys.exit(0)
def check_environment():
logging.info(f"NEBULA Platform version: {__version__}")
# check_version()
logging.info("======== Running Environment ========")
logging.info("OS: " + platform.platform())
logging.info("Hardware: " + platform.machine())
logging.info("Python version: " + sys.version)
try:
import torch
logging.info("PyTorch version: " + torch.__version__)
except ImportError:
logging.info("PyTorch is not installed properly")
except Exception:
pass
logging.info("======== CPU Configuration ========")
try:
import psutil
load1, load5, load15 = psutil.getloadavg()
cpu_usage = (load15 / os.cpu_count()) * 100
logging.info("The CPU usage is : {:.0f}%".format(cpu_usage))
logging.info(
"Available CPU Memory: {:.1f} G / {}G".format(
psutil.virtual_memory().available / 1024 / 1024 / 1024,
psutil.virtual_memory().total / 1024 / 1024 / 1024,
)
)
except ImportError:
logging.info("No CPU information available")
except Exception:
pass
if sys.platform == "win32" or sys.platform == "linux":
logging.info("======== GPU Configuration ========")
try:
import pynvml
pynvml.nvmlInit()
devices = pynvml.nvmlDeviceGetCount()
for i in range(devices):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
gpu_percent = pynvml.nvmlDeviceGetUtilizationRates(handle).gpu
gpu_temp = pynvml.nvmlDeviceGetTemperature(handle, pynvml.NVML_TEMPERATURE_GPU)
gpu_mem = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem_percent = gpu_mem.used / gpu_mem.total * 100
gpu_power = pynvml.nvmlDeviceGetPowerUsage(handle) / 1000.0
gpu_clocks = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_SM)
gpu_memory_clocks = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_MEM)
gpu_utilization = pynvml.nvmlDeviceGetUtilizationRates(handle)
gpu_fan_speed = pynvml.nvmlDeviceGetFanSpeed(handle)
logging.info(f"GPU{i} percent: {gpu_percent}")
logging.info(f"GPU{i} temp: {gpu_temp}")
logging.info(f"GPU{i} mem percent: {gpu_mem_percent}")
logging.info(f"GPU{i} power: {gpu_power}")
logging.info(f"GPU{i} clocks: {gpu_clocks}")
logging.info(f"GPU{i} memory clocks: {gpu_memory_clocks}")
logging.info(f"GPU{i} utilization: {gpu_utilization.gpu}")
logging.info(f"GPU{i} fan speed: {gpu_fan_speed}")
except ImportError:
logging.info("pynvml module not found, GPU information unavailable")
except Exception:
pass
else:
logging.info("GPU information unavailable")
| 4,151 | Python | .py | 84 | 39.214286 | 181 | 0.613468 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,937 | functions.py | enriquetomasmb_nebula/nebula/addons/functions.py | import logging
def print_msg_box(msg, indent=1, width=None, title=None, logger_name=None):
"""Print message-box with optional title."""
if logger_name:
logger = logging.getLogger(logger_name)
else:
logger = logging.getLogger()
if not isinstance(msg, str):
raise TypeError("msg parameter must be a string")
lines = msg.split("\n")
space = " " * indent
if not width:
width = max(map(len, lines))
if title:
width = max(width, len(title))
box = f'\n╔{"═" * (width + indent * 2)}╗\n' # upper_border
if title:
if not isinstance(title, str):
raise TypeError("title parameter must be a string")
box += f"║{space}{title:<{width}}{space}║\n" # title
box += f'║{space}{"-" * len(title):<{width}}{space}║\n' # underscore
box += "".join([f"║{space}{line:<{width}}{space}║\n" for line in lines])
box += f'╚{"═" * (width + indent * 2)}╝' # lower_border
logger.info(box)
| 1,031 | Python | .py | 24 | 34.791667 | 77 | 0.579815 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,938 | topologymanager.py | enriquetomasmb_nebula/nebula/addons/topologymanager.py | import random
import logging
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.switch_backend("Agg")
import networkx as nx
import numpy as np
from nebula.core.role import Role
class TopologyManager:
def __init__(
self,
scenario_name=None,
n_nodes=5,
b_symmetric=True,
undirected_neighbor_num=5,
topology=None,
):
self.scenario_name = scenario_name
if topology is None:
topology = []
self.n_nodes = n_nodes
self.b_symmetric = b_symmetric
self.undirected_neighbor_num = undirected_neighbor_num
self.topology = topology
# Initialize nodes with array of tuples (0,0,0) with size n_nodes
self.nodes = np.zeros((n_nodes, 3), dtype=np.int32)
self.b_fully_connected = False
if self.undirected_neighbor_num < 2:
raise ValueError("undirected_neighbor_num must be greater than 2")
# If the number of neighbors is larger than the number of nodes, then the topology is fully connected
if self.undirected_neighbor_num >= self.n_nodes - 1 and self.b_symmetric:
self.b_fully_connected = True
def __getstate__(self):
# Return the attributes of the class that should be serialized
return {
"scenario_name": self.scenario_name,
"n_nodes": self.n_nodes,
"topology": self.topology,
"nodes": self.nodes,
}
def __setstate__(self, state):
# Set the attributes of the class from the serialized state
self.scenario_name = state["scenario_name"]
self.n_nodes = state["n_nodes"]
self.topology = state["topology"]
self.nodes = state["nodes"]
def draw_graph(self, plot=False, path=None):
g = nx.from_numpy_array(self.topology)
# pos = nx.layout.spectral_layout(g)
# pos = nx.spring_layout(g, pos=pos, iterations=50)
pos = nx.spring_layout(g, k=0.15, iterations=20, seed=42)
fig = plt.figure(num="Network topology", dpi=100, figsize=(6, 6), frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.set_xlim([-1.3, 1.3])
ax.set_ylim([-1.3, 1.3])
# ax.axis('off')
labels = {}
color_map = []
server = False
for k in range(self.n_nodes):
if str(self.nodes[k][2]) == Role.AGGREGATOR:
color_map.append("orange")
elif str(self.nodes[k][2]) == Role.SERVER:
server = True
color_map.append("green")
elif str(self.nodes[k][2]) == Role.TRAINER:
color_map.append("#6182bd")
elif str(self.nodes[k][2]) == Role.PROXY:
color_map.append("purple")
else:
color_map.append("red")
labels[k] = f"P{k}\n" + str(self.nodes[k][0]) + ":" + str(self.nodes[k][1])
# nx.draw_networkx_nodes(g, pos_shadow, node_color='k', alpha=0.5)
nx.draw_networkx_nodes(g, pos, node_color=color_map, linewidths=2)
nx.draw_networkx_labels(g, pos, labels, font_size=10, font_weight="bold")
nx.draw_networkx_edges(g, pos, width=2)
# plt.margins(0.0)
roles = [str(i[2]) for i in self.nodes]
if Role.AGGREGATOR in roles:
plt.scatter([], [], c="orange", label="Aggregator")
if Role.SERVER in roles:
plt.scatter([], [], c="green", label="Server")
if Role.TRAINER in roles:
plt.scatter([], [], c="#6182bd", label="Trainer")
if Role.PROXY in roles:
plt.scatter([], [], c="purple", label="Proxy")
if Role.IDLE in roles:
plt.scatter([], [], c="red", label="Idle")
# plt.scatter([], [], c="green", label='Central Server')
# plt.scatter([], [], c="orange", label='Aggregator')
# plt.scatter([], [], c="#6182bd", label='Trainer')
# plt.scatter([], [], c="purple", label='Proxy')
# plt.scatter([], [], c="red", label='Idle')
plt.legend()
# import sys
# if path is None:
# if not os.path.exists(f"{sys.path[0]}/logs/{self.scenario_name}"):
# os.makedirs(f"{sys.path[0]}/logs/{self.scenario_name}")
# plt.savefig(f"{sys.path[0]}/logs/{self.scenario_name}/topology.png", dpi=100, bbox_inches="tight", pad_inches=0)
# else:
plt.savefig(f"{path}", dpi=100, bbox_inches="tight", pad_inches=0)
# plt.gcf().canvas.draw()
plt.close()
def generate_topology(self):
if self.b_fully_connected:
self.__fully_connected()
return
if self.b_symmetric:
self.__randomly_pick_neighbors_symmetric()
else:
self.__randomly_pick_neighbors_asymmetric()
def generate_server_topology(self):
self.topology = np.zeros((self.n_nodes, self.n_nodes), dtype=np.float32)
self.topology[0, :] = 1
self.topology[:, 0] = 1
np.fill_diagonal(self.topology, 0)
def generate_ring_topology(self, increase_convergence=False):
self.__ring_topology(increase_convergence=increase_convergence)
def generate_custom_topology(self, topology):
self.topology = topology
def get_matrix_adjacency_from_neighbors(self, neighbors):
matrix_adjacency = np.zeros((self.n_nodes, self.n_nodes), dtype=np.float32)
for i in range(self.n_nodes):
for j in range(self.n_nodes):
if i in neighbors[j]:
matrix_adjacency[i, j] = 1
return matrix_adjacency
def get_topology(self):
if self.b_symmetric:
return self.topology
else:
return self.topology
def get_nodes(self):
return self.nodes
@staticmethod
def get_coordinates(random_geo=True):
if random_geo:
if random.randint(0, 1) == 0:
# España
bounds = (36.0, 43.0, -9.0, 3.3) # min_lat, max_lat, min_lon, max_lon
else:
# Suiza
bounds = (45.8, 47.8, 5.9, 10.5) # min_lat, max_lat, min_lon, max_lon
min_latitude, max_latitude, min_longitude, max_longitude = bounds
latitude = random.uniform(min_latitude, max_latitude)
longitude = random.uniform(min_longitude, max_longitude)
return latitude, longitude
def add_nodes(self, nodes):
self.nodes = nodes
def update_nodes(self, config_participants):
self.nodes = config_participants
def get_node(self, node_idx):
return self.nodes[node_idx]
def get_neighbors_string(self, node_idx):
# logging.info(f"Topology: {self.topology}")
# logging.info(f"Nodes: {self.nodes}")
neighbors_data = []
for i, node in enumerate(self.topology[node_idx]):
if node == 1:
neighbors_data.append(self.nodes[i])
neighbors_data_strings = [f"{i[0]}:{i[1]}" for i in neighbors_data]
neighbors_data_string = " ".join(neighbors_data_strings)
logging.info(f"Neighbors of node participant_{node_idx}: {neighbors_data_string}")
return neighbors_data_string
def __ring_topology(self, increase_convergence=False):
topology_ring = np.array(
nx.to_numpy_matrix(nx.watts_strogatz_graph(self.n_nodes, 2, 0)),
dtype=np.float32,
)
if increase_convergence:
# Create random links between nodes in topology_ring
for i in range(self.n_nodes):
for j in range(self.n_nodes):
if topology_ring[i][j] == 0:
if random.random() < 0.1:
topology_ring[i][j] = 1
topology_ring[j][i] = 1
np.fill_diagonal(topology_ring, 0)
self.topology = topology_ring
def __randomly_pick_neighbors_symmetric(self):
# First generate a ring topology
topology_ring = np.array(
nx.to_numpy_matrix(nx.watts_strogatz_graph(self.n_nodes, 2, 0)),
dtype=np.float32,
)
np.fill_diagonal(topology_ring, 0)
# After, randomly add some links for each node (symmetric)
# If undirected_neighbor_num is X, then each node has X links to other nodes
k = int(self.undirected_neighbor_num)
topology_random_link = np.array(
nx.to_numpy_matrix(nx.watts_strogatz_graph(self.n_nodes, k, 0)),
dtype=np.float32,
)
# generate symmetric topology
topology_symmetric = topology_ring.copy()
for i in range(self.n_nodes):
for j in range(self.n_nodes):
if topology_symmetric[i][j] == 0 and topology_random_link[i][j] == 1:
topology_symmetric[i][j] = topology_random_link[i][j]
np.fill_diagonal(topology_symmetric, 0)
self.topology = topology_symmetric
def __randomly_pick_neighbors_asymmetric(self):
# randomly add some links for each node (symmetric)
k = self.undirected_neighbor_num
topology_random_link = np.array(
nx.to_numpy_matrix(nx.watts_strogatz_graph(self.n_nodes, k, 0)),
dtype=np.float32,
)
np.fill_diagonal(topology_random_link, 0)
# first generate a ring topology
topology_ring = np.array(
nx.to_numpy_matrix(nx.watts_strogatz_graph(self.n_nodes, 2, 0)),
dtype=np.float32,
)
np.fill_diagonal(topology_ring, 0)
for i in range(self.n_nodes):
for j in range(self.n_nodes):
if topology_ring[i][j] == 0 and topology_random_link[i][j] == 1:
topology_ring[i][j] = topology_random_link[i][j]
np.fill_diagonal(topology_ring, 0)
# randomly delete some links
out_link_set = set()
for i in range(self.n_nodes):
len_row_zero = 0
for j in range(self.n_nodes):
if topology_ring[i][j] == 0:
len_row_zero += 1
random_selection = np.random.randint(2, size=len_row_zero)
index_of_zero = 0
for j in range(self.n_nodes):
out_link = j * self.n_nodes + i
if topology_ring[i][j] == 0:
if random_selection[index_of_zero] == 1 and out_link not in out_link_set:
topology_ring[i][j] = 1
out_link_set.add(i * self.n_nodes + j)
index_of_zero += 1
np.fill_diagonal(topology_ring, 0)
self.topology = topology_ring
def __fully_connected(self):
topology_fully_connected = np.array(
nx.to_numpy_matrix(nx.watts_strogatz_graph(self.n_nodes, self.n_nodes - 1, 0)),
dtype=np.float32,
)
np.fill_diagonal(topology_fully_connected, 0)
for i in range(self.n_nodes):
for j in range(self.n_nodes):
if topology_fully_connected[i][j] != 1:
topology_fully_connected[i][j] = 1
np.fill_diagonal(topology_fully_connected, 0)
self.topology = topology_fully_connected
| 11,231 | Python | .py | 251 | 34.023904 | 125 | 0.577022 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,939 | reporter.py | enriquetomasmb_nebula/nebula/addons/reporter.py | import asyncio
import importlib
import json
import logging
import aiohttp
import sys
import psutil
import os
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from nebula.core.network.communications import CommunicationsManager
class Reporter:
def __init__(self, config, trainer, cm: "CommunicationsManager"):
logging.info(f"Starting reporter module")
self.config = config
self.trainer = trainer
self.cm = cm
self.frequency = self.config.participant["reporter_args"]["report_frequency"]
self.grace_time = self.config.participant["reporter_args"]["grace_time_reporter"]
self.data_queue = asyncio.Queue()
self.url = f'http://{self.config.participant["scenario_args"]["controller"]}/nebula/dashboard/{self.config.participant["scenario_args"]["name"]}/node/update'
self.counter = 0
self.first_net_metrics = True
self.prev_bytes_sent = 0
self.prev_bytes_recv = 0
self.prev_packets_sent = 0
self.prev_packets_recv = 0
self.acc_bytes_sent = 0
self.acc_bytes_recv = 0
self.acc_packets_sent = 0
self.acc_packets_recv = 0
async def enqueue_data(self, name, value):
await self.data_queue.put((name, value))
async def start(self):
await asyncio.sleep(self.grace_time)
asyncio.create_task(self.run_reporter())
async def run_reporter(self):
while True:
# NOTE: currently disabled
# if self.config.participant["scenario_args"]["controller"] != "nebula-test":
# await self.__report_status_to_controller()
# await self.__report_data_queue()
await self.__report_resources()
self.counter += 1
if self.counter % 50 == 0:
logging.info(f"Reloading config file...")
self.cm.engine.config.reload_config_file()
await asyncio.sleep(self.frequency)
async def report_scenario_finished(self):
url = f'http://{self.config.participant["scenario_args"]["controller"]}/nebula/dashboard/{self.config.participant["scenario_args"]["name"]}/node/done'
data = json.dumps({"idx": self.config.participant["device_args"]["idx"]})
headers = {
"Content-Type": "application/json",
"User-Agent": f'NEBULA Participant {self.config.participant["device_args"]["idx"]}',
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(url, data=data, headers=headers) as response:
if response.status != 200:
logging.error(f"Error received from controller: {response.status} (probably there is overhead in the controller, trying again in the next round)")
text = await response.text()
logging.debug(text)
else:
logging.info(f"Participant {self.config.participant['device_args']['idx']} reported scenario finished")
return True
except aiohttp.ClientError as e:
logging.error(f"Error connecting to the controller at {url}: {e}")
return False
async def __report_data_queue(self):
while not self.data_queue.empty():
name, value = await self.data_queue.get()
await self.trainer.logger.log_data({name: value}) # Assuming log_data can be made async
self.data_queue.task_done()
async def __report_status_to_controller(self):
try:
async with aiohttp.ClientSession() as session:
async with session.post(
self.url,
data=json.dumps(self.config.participant),
headers={
"Content-Type": "application/json",
"User-Agent": f'NEBULA Participant {self.config.participant["device_args"]["idx"]}',
},
) as response:
if response.status != 200:
logging.error(f"Error received from controller: {response.status} (probably there is overhead in the controller, trying again in the next round)")
text = await response.text()
logging.debug(text)
except aiohttp.ClientError as e:
logging.error(f"Error connecting to the controller at {self.url}: {e}")
except Exception as e:
logging.error(f"Error sending status to controller, will try again in a few seconds: {e}")
await asyncio.sleep(5)
async def __report_resources(self):
cpu_percent = psutil.cpu_percent()
cpu_temp = 0
try:
if sys.platform == "linux":
sensors = await asyncio.to_thread(psutil.sensors_temperatures)
cpu_temp = sensors.get("coretemp")[0].current if sensors.get("coretemp") else 0
except Exception as e:
pass
pid = os.getpid()
cpu_percent_process = await asyncio.to_thread(psutil.Process(pid).cpu_percent, interval=1)
process = psutil.Process(pid)
memory_process = await asyncio.to_thread(lambda: process.memory_info().rss / (1024**2))
memory_percent_process = process.memory_percent()
memory_info = await asyncio.to_thread(psutil.virtual_memory)
memory_percent = memory_info.percent
memory_used = memory_info.used / (1024**2)
disk_percent = psutil.disk_usage("/").percent
net_io_counters = await asyncio.to_thread(psutil.net_io_counters)
bytes_sent = net_io_counters.bytes_sent
bytes_recv = net_io_counters.bytes_recv
packets_sent = net_io_counters.packets_sent
packets_recv = net_io_counters.packets_recv
if self.first_net_metrics:
bytes_sent_diff = 0
bytes_recv_diff = 0
packets_sent_diff = 0
packets_recv_diff = 0
self.first_net_metrics = False
else:
bytes_sent_diff = bytes_sent - self.prev_bytes_sent
bytes_recv_diff = bytes_recv - self.prev_bytes_recv
packets_sent_diff = packets_sent - self.prev_packets_sent
packets_recv_diff = packets_recv - self.prev_packets_recv
self.prev_bytes_sent = bytes_sent
self.prev_bytes_recv = bytes_recv
self.prev_packets_sent = packets_sent
self.prev_packets_recv = packets_recv
self.acc_bytes_sent += bytes_sent_diff
self.acc_bytes_recv += bytes_recv_diff
self.acc_packets_sent += packets_sent_diff
self.acc_packets_recv += packets_recv_diff
current_connections = await self.cm.get_addrs_current_connections(only_direct=True)
resources = {
"CPU/CPU global (%)": cpu_percent,
"CPU/CPU process (%)": cpu_percent_process,
"CPU/CPU temperature (°)": cpu_temp,
"RAM/RAM global (%)": memory_percent,
"RAM/RAM global (MB)": memory_used,
"RAM/RAM process (%)": memory_percent_process,
"RAM/RAM process (MB)": memory_process,
"Disk/Disk (%)": disk_percent,
"Network/Network (bytes sent)": round(self.acc_bytes_sent / (1024 ** 2), 3),
"Network/Network (bytes received)": round(self.acc_bytes_recv / (1024 ** 2), 3),
"Network/Network (packets sent)": self.acc_packets_sent,
"Network/Network (packets received)": self.acc_packets_recv,
"Network/Connections": len(current_connections),
}
self.trainer.logger.log_data(resources)
if importlib.util.find_spec("pynvml") is not None:
try:
import pynvml
await asyncio.to_thread(pynvml.nvmlInit)
devices = await asyncio.to_thread(pynvml.nvmlDeviceGetCount)
for i in range(devices):
handle = await asyncio.to_thread(pynvml.nvmlDeviceGetHandleByIndex, i)
gpu_percent = (await asyncio.to_thread(pynvml.nvmlDeviceGetUtilizationRates, handle)).gpu
gpu_temp = await asyncio.to_thread(pynvml.nvmlDeviceGetTemperature, handle, pynvml.NVML_TEMPERATURE_GPU)
gpu_mem = await asyncio.to_thread(pynvml.nvmlDeviceGetMemoryInfo, handle)
gpu_mem_percent = round(gpu_mem.used / gpu_mem.total * 100, 3)
gpu_power = await asyncio.to_thread(pynvml.nvmlDeviceGetPowerUsage, handle) / 1000.0
gpu_clocks = await asyncio.to_thread(pynvml.nvmlDeviceGetClockInfo, handle, pynvml.NVML_CLOCK_SM)
gpu_memory_clocks = await asyncio.to_thread(pynvml.nvmlDeviceGetClockInfo, handle, pynvml.NVML_CLOCK_MEM)
gpu_fan_speed = await asyncio.to_thread(pynvml.nvmlDeviceGetFanSpeed, handle)
gpu_info = {
f"GPU/GPU{i} (%)": gpu_percent,
f"GPU/GPU{i} temperature (°)": gpu_temp,
f"GPU/GPU{i} memory (%)": gpu_mem_percent,
f"GPU/GPU{i} power": gpu_power,
f"GPU/GPU{i} clocks": gpu_clocks,
f"GPU/GPU{i} memory clocks": gpu_memory_clocks,
f"GPU/GPU{i} fan speed": gpu_fan_speed,
}
self.trainer.logger.log_data(gpu_info)
except Exception:
pass
| 9,520 | Python | .py | 179 | 40.223464 | 170 | 0.599957 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,940 | attacks.py | enriquetomasmb_nebula/nebula/addons/attacks/attacks.py | from typing import Any
import torch
import numpy as np
from torchmetrics.functional import pairwise_cosine_similarity
from copy import deepcopy
import logging
# To take into account:
# - Malicious nodes do not train on their own data
# - Malicious nodes aggregate the weights of the other nodes, but not their own
# - The received weights may be the node own weights (aggregated of neighbors), or
# if the attack is performed specifically for one of the neighbors, it can take
# its weights only (should be more effective if they are different).
def create_attack(attack_name):
"""
Function to create an attack object from its name.
"""
if attack_name == "GLLNeuronInversionAttack":
return GLLNeuronInversionAttack()
elif attack_name == "NoiseInjectionAttack":
return NoiseInjectionAttack()
elif attack_name == "SwappingWeightsAttack":
return SwappingWeightsAttack()
elif attack_name == "DelayerAttack":
return DelayerAttack()
else:
return None
class Attack:
def __call__(self, *args: Any, **kwds: Any) -> Any:
return self.attack(*args, **kwds)
def attack(self, received_weights):
"""
Function to perform the attack on the received weights. It should return the
attacked weights.
"""
raise NotImplementedError
class GLLNeuronInversionAttack(Attack):
"""
Function to perform neuron inversion attack on the received weights.
"""
def __init__(self, strength=5.0, perc=1.0):
super().__init__()
self.strength = strength
self.perc = perc
def attack(self, received_weights):
logging.info("[GLLNeuronInversionAttack] Performing neuron inversion attack")
lkeys = list(received_weights.keys())
logging.info(f"Layer inverted: {lkeys[-2]}")
received_weights[lkeys[-2]].data = torch.rand(received_weights[lkeys[-2]].shape) * 10000
return received_weights
class NoiseInjectionAttack(Attack):
"""
Function to perform noise injection attack on the received weights.
"""
def __init__(self, strength=10000, perc=1.0):
super().__init__()
self.strength = strength
self.perc = perc
def attack(self, received_weights):
logging.info("[NoiseInjectionAttack] Performing noise injection attack")
lkeys = list(received_weights.keys())
for k in lkeys:
logging.info(f"Layer noised: {k}")
received_weights[k].data += torch.randn(received_weights[k].shape) * self.strength
return received_weights
class SwappingWeightsAttack(Attack):
"""
Function to perform swapping weights attack on the received weights. Note that this
attack performance is not consistent due to its stochasticity.
Warning: depending on the layer the code may not work (due to reshaping in between),
or it may be slow (scales quadratically with the layer size).
Do not apply to last layer, as it would make the attack detectable (high loss
on malicious node).
"""
def __init__(self, layer_idx=0):
super().__init__()
self.layer_idx = layer_idx
def attack(self, received_weights):
logging.info("[SwappingWeightsAttack] Performing swapping weights attack")
lkeys = list(received_weights.keys())
wm = received_weights[lkeys[self.layer_idx]]
# Compute similarity matrix
sm = torch.zeros((wm.shape[0], wm.shape[0]))
for j in range(wm.shape[0]):
sm[j] = pairwise_cosine_similarity(wm[j].reshape(1, -1), wm.reshape(wm.shape[0], -1))
# Check rows/cols where greedy approach is optimal
nsort = np.full(sm.shape[0], -1)
rows = []
for j in range(sm.shape[0]):
k = torch.argmin(sm[j])
if torch.argmin(sm[:, k]) == j:
nsort[j] = k
rows.append(j)
not_rows = np.array([i for i in range(sm.shape[0]) if i not in rows])
# Ensure the rest of the rows are fully permuted (not optimal, but good enough)
nrs = deepcopy(not_rows)
nrs = np.random.permutation(nrs)
while np.any(nrs == not_rows):
nrs = np.random.permutation(nrs)
nsort[not_rows] = nrs
nsort = torch.tensor(nsort)
# Apply permutation to weights
received_weights[lkeys[self.layer_idx]] = received_weights[lkeys[self.layer_idx]][nsort]
received_weights[lkeys[self.layer_idx + 1]] = received_weights[lkeys[self.layer_idx + 1]][nsort]
if self.layer_idx + 2 < len(lkeys):
received_weights[lkeys[self.layer_idx + 2]] = received_weights[lkeys[self.layer_idx + 2]][:, nsort]
return received_weights
class DelayerAttack(Attack):
"""
Function to perform delayer attack on the received weights. It delays the
weights for an indefinite number of rounds.
"""
def __init__(self):
super().__init__()
self.weights = None
def attack(self, received_weights):
logging.info("[DelayerAttack] Performing delayer attack")
if self.weights is None:
self.weights = deepcopy(received_weights)
return self.weights
| 5,210 | Python | .py | 119 | 36.554622 | 111 | 0.662584 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,941 | datapoison.py | enriquetomasmb_nebula/nebula/addons/attacks/poisoning/datapoison.py | import copy
import random
import numpy as np
import torch
from skimage.util import random_noise
def datapoison(dataset, indices, poisoned_persent, poisoned_ratio, targeted=False, target_label=3, noise_type="salt"):
"""
Function to add random noise of various types to the dataset.
"""
new_dataset = copy.deepcopy(dataset)
train_data = new_dataset.data
targets = new_dataset.targets
num_indices = len(indices)
if type(noise_type) != type("salt"):
noise_type = noise_type[0]
if targeted == False:
num_poisoned = int(poisoned_persent * num_indices)
if num_indices == 0:
return new_dataset
if num_poisoned > num_indices:
return new_dataset
poisoned_indice = random.sample(indices, num_poisoned)
for i in poisoned_indice:
t = train_data[i]
if noise_type == "salt":
# Replaces random pixels with 1.
poisoned = torch.tensor(random_noise(t, mode=noise_type, amount=poisoned_ratio))
elif noise_type == "gaussian":
# Gaussian-distributed additive noise.
poisoned = torch.tensor(random_noise(t, mode=noise_type, mean=0, var=poisoned_ratio, clip=True))
elif noise_type == "s&p":
# Replaces random pixels with either 1 or low_val, where low_val is 0 for unsigned images or -1 for signed images.
poisoned = torch.tensor(random_noise(t, mode=noise_type, amount=poisoned_ratio))
elif noise_type == "nlp_rawdata":
# for NLP data, change the word vector to 0 with p=poisoned_ratio
poisoned = poison_to_nlp_rawdata(t, poisoned_ratio)
else:
print("ERROR: poison attack type not supported.")
poisoned = t
train_data[i] = poisoned
else:
for i in indices:
if int(targets[i]) == int(target_label):
t = train_data[i]
poisoned = add_x_to_image(t)
train_data[i] = poisoned
new_dataset.data = train_data
return new_dataset
def add_x_to_image(img):
"""
Add a 10*10 pixels X at the top-left of an image
"""
for i in range(0, 10):
for j in range(0, 10):
if i + j <= 9 or i == j:
img[i][j] = 255
return torch.tensor(img)
def poison_to_nlp_rawdata(text_data, poisoned_ratio):
"""
for NLP data, change the word vector to 0 with p=poisoned_ratio
"""
non_zero_vector_indice = [i for i in range(0, len(text_data)) if text_data[i][0] != 0]
non_zero_vector_len = len(non_zero_vector_indice)
num_poisoned_token = int(poisoned_ratio * non_zero_vector_len)
if num_poisoned_token == 0:
return text_data
if num_poisoned_token > non_zero_vector_len:
return text_data
poisoned_token_indice = random.sample(non_zero_vector_indice, num_poisoned_token)
zero_vector = torch.Tensor(np.zeros(len(text_data[0][0])))
for i in poisoned_token_indice:
text_data[i] = zero_vector
return text_data
| 3,110 | Python | .py | 73 | 33.780822 | 130 | 0.616325 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,942 | labelflipping.py | enriquetomasmb_nebula/nebula/addons/attacks/poisoning/labelflipping.py | import copy
import random
import torch
def labelFlipping(dataset, indices, poisoned_persent=0, targeted=False, target_label=4, target_changed_label=7):
"""
select flipping_persent of labels, and change them to random values.
Args:
dataset: the dataset of training data, torch.util.data.dataset like.
indices: Indices of subsets, list like.
flipping_persent: The ratio of labels want to change, float like.
"""
new_dataset = copy.deepcopy(dataset)
targets = new_dataset.targets.detach().clone()
num_indices = len(indices)
# classes = new_dataset.classes
# class_to_idx = new_dataset.class_to_idx
# class_list = [class_to_idx[i] for i in classes]
class_list = set(targets.tolist())
if targeted == False:
num_flipped = int(poisoned_persent * num_indices)
if num_indices == 0:
return new_dataset
if num_flipped > num_indices:
return new_dataset
flipped_indice = random.sample(indices, num_flipped)
for i in flipped_indice:
t = targets[i]
flipped = torch.tensor(random.sample(class_list, 1)[0])
while t == flipped:
flipped = torch.tensor(random.sample(class_list, 1)[0])
targets[i] = flipped
else:
for i in indices:
if int(targets[i]) == int(target_label):
targets[i] = torch.tensor(target_changed_label)
new_dataset.targets = targets
return new_dataset
| 1,501 | Python | .py | 37 | 32.756757 | 112 | 0.642466 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,943 | modelpoison.py | enriquetomasmb_nebula/nebula/addons/attacks/poisoning/modelpoison.py | from collections import OrderedDict
import torch
from skimage.util import random_noise
def modelpoison(model: OrderedDict, poisoned_ratio, noise_type="gaussian"):
"""
Function to add random noise of various types to the model parameter.
"""
poisoned_model = OrderedDict()
if type(noise_type) != type("salt"):
noise_type = noise_type[0]
for layer in model:
bt = model[layer]
t = bt.detach().clone()
single_point = False
if len(t.shape) == 0:
t = t.view(-1)
single_point = True
# print(t)
if noise_type == "salt":
# Replaces random pixels with 1.
poisoned = torch.tensor(random_noise(t, mode=noise_type, amount=poisoned_ratio))
elif noise_type == "gaussian":
# Gaussian-distributed additive noise.
poisoned = torch.tensor(random_noise(t, mode=noise_type, mean=0, var=poisoned_ratio, clip=True))
elif noise_type == "s&p":
# Replaces random pixels with either 1 or low_val, where low_val is 0 for unsigned images or -1 for signed images.
poisoned = torch.tensor(random_noise(t, mode=noise_type, amount=poisoned_ratio))
else:
print("ERROR: poison attack type not supported.")
poisoned = t
if single_point:
poisoned = poisoned[0]
poisoned_model[layer] = poisoned
return poisoned_model
| 1,440 | Python | .py | 34 | 33.676471 | 126 | 0.625268 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,944 | factsheet.py | enriquetomasmb_nebula/nebula/addons/trustworthiness/factsheet.py | import json
import logging
import os
import glob
import shutil
import datetime
from json import JSONDecodeError
import pickle
import numpy as np
from numpy import NaN
import torch
import pandas as pd
import re
from nebula.core.models.mnist.mlp import MNISTTorchModelMLP, MNISTModelMLP
from nebula.core.models.mnist.cnn import MNISTTorchModelCNN, MNISTModelCNN
from nebula.core.models.mnist.mlp import SyscallTorchModelMLP, SyscallModelMLP
from nebula.core.models.mnist.cnn import CIFAR10TorchModelCNN, CIFAR10ModelCNN
from nebula.addons.trustworthiness.calculation import get_elapsed_time, get_bytes_models, get_bytes_sent_recv, get_avg_loss_accuracy, get_cv, get_clever_score, get_feature_importance_cv
from nebula.addons.trustworthiness.utils import count_class_samples, read_csv, check_field_filled, get_entropy
dirname = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
class Factsheet:
def __init__(self):
"""
Manager class to populate the FactSheet
"""
self.factsheet_file_nm = "factsheet.json"
self.factsheet_template_file_nm = "factsheet_template.json"
def populate_factsheet_pre_train(self, data, scenario_name):
"""
Populates the factsheet with values before the training.
Args:
data (dict): Contains the data from the scenario.
scenario_name (string): The name of the scenario.
"""
factsheet_file = os.path.join(dirname, f"files/{scenario_name}/{self.factsheet_file_nm}")
factsheet_template = os.path.join(dirname, f"configs/{self.factsheet_template_file_nm}")
if not os.path.exists(factsheet_file):
shutil.copyfile(factsheet_template, factsheet_file)
with open(factsheet_file, "r+") as f:
factsheet = {}
try:
factsheet = json.load(f)
if data is not None:
logger.info("FactSheet: Populating factsheet with pre training metrics")
federation = data["federation"]
n_nodes = int(data["n_nodes"])
dataset = data["dataset"]
algorithm = data["model"]
aggregation_algorithm = data["agg_algorithm"]
n_rounds = int(data["rounds"])
attack = data["attacks"]
poisoned_node_percent = int(data["poisoned_node_percent"])
poisoned_sample_percent = int(data["poisoned_sample_percent"])
poisoned_noise_percent = int(data["poisoned_noise_percent"])
with_reputation = data["with_reputation"]
is_dynamic_topology = data["is_dynamic_topology"]
is_dynamic_aggregation = data["is_dynamic_aggregation"]
target_aggregation = data["target_aggregation"]
if attack != "No Attack" and with_reputation == True and is_dynamic_aggregation == True:
background = f"For the project setup, the most important aspects are the following: The federation architecture is {federation}, involving {n_nodes} clients, the dataset used is {dataset}, the learning algorithm is {algorithm}, the aggregation algorithm is {aggregation_algorithm} and the number of rounds is {n_rounds}. In addition, the type of attack used against the clients is {attack}, where the percentage of attacked nodes is {poisoned_node_percent}, the percentage of attacked samples of each node is {poisoned_sample_percent}, and the percent of poisoned noise is {poisoned_noise_percent}. A reputation-based defence with a dynamic aggregation based on the aggregation algorithm {target_aggregation} is used, and the trustworthiness of the project is desired."
elif attack != "No Attack" and with_reputation == True and is_dynamic_topology == True:
background = f"For the project setup, the most important aspects are the following: The federation architecture is {federation}, involving {n_nodes} clients, the dataset used is {dataset}, the learning algorithm is {algorithm}, the aggregation algorithm is {aggregation_algorithm} and the number of rounds is {n_rounds}. In addition, the type of attack used against the clients is {attack}, where the percentage of attacked nodes is {poisoned_node_percent}, the percentage of attacked samples of each node is {poisoned_sample_percent}, and the percent of poisoned noise is {poisoned_noise_percent}. A reputation-based defence with a dynamic topology is used, and the trustworthiness of the project is desired."
elif attack != "No Attack" and with_reputation == False:
background = f"For the project setup, the most important aspects are the following: The federation architecture is {federation}, involving {n_nodes} clients, the dataset used is {dataset}, the learning algorithm is {algorithm}, the aggregation algorithm is {aggregation_algorithm} and the number of rounds is {n_rounds}. In addition, the type of attack used against the clients is {attack}, where the percentage of attacked nodes is {poisoned_node_percent}, the percentage of attacked samples of each node is {poisoned_sample_percent}, and the percent of poisoned noise is {poisoned_noise_percent}. No defence mechanism is used, and the trustworthiness of the project is desired."
elif attack == "No Attack":
background = f"For the project setup, the most important aspects are the following: The federation architecture is {federation}, involving {n_nodes} clients, the dataset used is {dataset}, the learning algorithm is {algorithm}, the aggregation algorithm is {aggregation_algorithm} and the number of rounds is {n_rounds}. No attacks against clients are used, and the trustworthiness of the project is desired."
# Set project specifications
factsheet["project"]["overview"] = data["scenario_title"]
factsheet["project"]["purpose"] = data["scenario_description"]
factsheet["project"]["background"] = background
# Set data specifications
factsheet["data"]["provenance"] = data["dataset"]
factsheet["data"]["preprocessing"] = data["topology"]
# Set participants
factsheet["participants"]["client_num"] = data["n_nodes"] or ""
factsheet["participants"]["sample_client_rate"] = 1
factsheet["participants"]["client_selector"] = ""
# Set configuration
factsheet["configuration"]["aggregation_algorithm"] = data["agg_algorithm"] or ""
factsheet["configuration"]["training_model"] = data["model"] or ""
factsheet["configuration"]["personalization"] = False
factsheet["configuration"]["visualization"] = True
factsheet["configuration"]["total_round_num"] = n_rounds
if poisoned_noise_percent != 0:
factsheet["configuration"]["differential_privacy"] = True
factsheet["configuration"]["dp_epsilon"] = poisoned_noise_percent
else:
factsheet["configuration"]["differential_privacy"] = False
factsheet["configuration"]["dp_epsilon"] = ""
if dataset == "MNIST" and algorithm == "MLP":
model = MNISTModelMLP()
elif dataset == "MNIST" and algorithm == "CNN":
model = MNISTModelCNN()
elif dataset == "Syscall" and algorithm == "MLP":
model = SyscallModelMLP()
else:
model = CIFAR10ModelCNN()
factsheet["configuration"]["learning_rate"] = model.get_learning_rate()
factsheet["configuration"]["trainable_param_num"] = model.count_parameters()
factsheet["configuration"]["local_update_steps"] = 1
except JSONDecodeError as e:
logger.warning(f"{factsheet_file} is invalid")
logger.error(e)
f.seek(0)
f.truncate()
json.dump(factsheet, f, indent=4)
f.close()
def populate_factsheet_post_train(self, scenario):
"""
Populates the factsheet with values after the training.
Args:
scenario (object): The scenario object.
"""
scenario_name = scenario[0]
factsheet_file = os.path.join(dirname, f"files/{scenario_name}/{self.factsheet_file_nm}")
logger.info("FactSheet: Populating factsheet with post training metrics")
with open(factsheet_file, "r+") as f:
factsheet = {}
try:
factsheet = json.load(f)
dataset = factsheet["data"]["provenance"]
model = factsheet["configuration"]["training_model"]
actual_dir = os.getcwd()
files_dir = f"{actual_dir}/trustworthiness/files/{scenario_name}"
data_dir = f"{actual_dir}/trustworthiness/data/"
models_files = glob.glob(os.path.join(files_dir, "*final_model*"))
bytes_sent_files = glob.glob(os.path.join(files_dir, "*bytes_sent*"))
bytes_recv_files = glob.glob(os.path.join(files_dir, "*bytes_recv*"))
loss_files = glob.glob(os.path.join(files_dir, "*loss*"))
accuracy_files = glob.glob(os.path.join(files_dir, "*accuracy*"))
dataloaders_files = glob.glob(os.path.join(files_dir, "*train_loader*"))
test_dataloader_file = f"{files_dir}/participant_1_test_loader.pk"
train_model_file = f"{files_dir}/participant_1_train_model.pk"
emissions_file = os.path.join(files_dir, "emissions.csv")
# Entropy
i = 0
for file in dataloaders_files:
with open(file, "rb") as file:
dataloader = pickle.load(file)
get_entropy(i, scenario_name, dataloader)
i += 1
with open(f"{files_dir}/entropy.json", "r") as file:
entropy_distribution = json.load(file)
values = np.array(list(entropy_distribution.values()))
normalized_values = (values - np.min(values)) / (np.max(values) - np.min(values))
avg_entropy = np.mean(normalized_values)
factsheet["data"]["avg_entropy"] = avg_entropy
# Set performance data
result_avg_loss_accuracy = get_avg_loss_accuracy(loss_files, accuracy_files)
factsheet["performance"]["test_loss_avg"] = result_avg_loss_accuracy[0]
factsheet["performance"]["test_acc_avg"] = result_avg_loss_accuracy[1]
test_acc_cv = get_cv(std=result_avg_loss_accuracy[2], mean=result_avg_loss_accuracy[1])
factsheet["fairness"]["test_acc_cv"] = 1 if test_acc_cv > 1 else test_acc_cv
factsheet["system"]["avg_time_minutes"] = get_elapsed_time(scenario)
factsheet["system"]["avg_model_size"] = get_bytes_models(models_files)
result_bytes_sent_recv = get_bytes_sent_recv(bytes_sent_files, bytes_recv_files)
factsheet["system"]["total_upload_bytes"] = result_bytes_sent_recv[0]
factsheet["system"]["total_download_bytes"] = result_bytes_sent_recv[1]
factsheet["system"]["avg_upload_bytes"] = result_bytes_sent_recv[2]
factsheet["system"]["avg_download_bytes"] = result_bytes_sent_recv[3]
factsheet["fairness"]["selection_cv"] = 1
count_class_samples(scenario_name, dataloaders_files)
with open(f"{files_dir}/count_class.json", "r") as file:
class_distribution = json.load(file)
class_samples_sizes = [x for x in class_distribution.values()]
class_imbalance = get_cv(list=class_samples_sizes)
factsheet["fairness"]["class_imbalance"] = 1 if class_imbalance > 1 else class_imbalance
with open(train_model_file, "rb") as file:
lightning_model = pickle.load(file)
if dataset == "MNIST" and model == "MLP":
pytorch_model = MNISTTorchModelMLP()
elif dataset == "MNIST" and model == "CNN":
pytorch_model = MNISTTorchModelCNN()
elif dataset == "Syscall" and model == "MLP":
pytorch_model = SyscallTorchModelMLP()
else:
pytorch_model = CIFAR10TorchModelCNN()
pytorch_model.load_state_dict(lightning_model.state_dict())
with open(test_dataloader_file, "rb") as file:
test_dataloader = pickle.load(file)
test_sample = next(iter(test_dataloader))
lr = factsheet["configuration"]["learning_rate"]
value_clever = get_clever_score(pytorch_model, test_sample, 10, lr)
factsheet["performance"]["test_clever"] = 1 if value_clever > 1 else value_clever
feature_importance = get_feature_importance_cv(pytorch_model, test_sample)
factsheet["performance"]["test_feature_importance_cv"] = 1 if feature_importance > 1 else feature_importance
# Set emissions metrics
emissions = None if emissions_file is None else read_csv(emissions_file)
if emissions is not None:
logger.info("FactSheet: Populating emissions")
cpu_spez_df = pd.read_csv(os.path.join(data_dir, "CPU_benchmarks_v4.csv"), header=0)
emissions["CPU_model"] = emissions["CPU_model"].astype(str).str.replace(r"\([^)]*\)", "", regex=True)
emissions["CPU_model"] = emissions["CPU_model"].astype(str).str.replace(r" CPU", "", regex=True)
emissions["GPU_model"] = emissions["GPU_model"].astype(str).str.replace(r"[0-9] x ", "", regex=True)
emissions = pd.merge(emissions, cpu_spez_df[["cpuName", "powerPerf"]], left_on="CPU_model", right_on="cpuName", how="left")
gpu_spez_df = pd.read_csv(os.path.join(data_dir, "GPU_benchmarks_v7.csv"), header=0)
emissions = pd.merge(emissions, gpu_spez_df[["gpuName", "powerPerformance"]], left_on="GPU_model", right_on="gpuName", how="left")
emissions.drop("cpuName", axis=1, inplace=True)
emissions.drop("gpuName", axis=1, inplace=True)
emissions["powerPerf"] = emissions["powerPerf"].astype(float)
emissions["powerPerformance"] = emissions["powerPerformance"].astype(float)
client_emissions = emissions.loc[emissions["role"] == "client"]
client_avg_carbon_intensity = round(client_emissions["energy_grid"].mean(), 2)
factsheet["sustainability"]["avg_carbon_intensity_clients"] = check_field_filled(factsheet, ["sustainability", "avg_carbon_intensity_clients"], client_avg_carbon_intensity, "")
factsheet["sustainability"]["emissions_training"] = check_field_filled(factsheet, ["sustainability", "emissions_training"], client_emissions["emissions"].sum(), "")
factsheet["participants"]["avg_dataset_size"] = check_field_filled(factsheet, ["participants", "avg_dataset_size"], client_emissions["sample_size"].mean(), "")
server_emissions = emissions.loc[emissions["role"] == "server"]
server_avg_carbon_intensity = round(server_emissions["energy_grid"].mean(), 2)
factsheet["sustainability"]["avg_carbon_intensity_server"] = check_field_filled(factsheet, ["sustainability", "avg_carbon_intensity_server"], server_avg_carbon_intensity, "")
factsheet["sustainability"]["emissions_aggregation"] = check_field_filled(factsheet, ["sustainability", "emissions_aggregation"], server_emissions["emissions"].sum(), "")
GPU_powerperf = (server_emissions.loc[server_emissions["GPU_used"] == True])["powerPerformance"]
CPU_powerperf = (server_emissions.loc[server_emissions["CPU_used"] == True])["powerPerf"]
server_power_performance = round(pd.concat([GPU_powerperf, CPU_powerperf]).mean(), 2)
factsheet["sustainability"]["avg_power_performance_server"] = check_field_filled(factsheet, ["sustainability", "avg_power_performance_server"], server_power_performance, "")
GPU_powerperf = (client_emissions.loc[client_emissions["GPU_used"] == True])["powerPerformance"]
CPU_powerperf = (client_emissions.loc[client_emissions["CPU_used"] == True])["powerPerf"]
clients_power_performance = round(pd.concat([GPU_powerperf, CPU_powerperf]).mean(), 2)
factsheet["sustainability"]["avg_power_performance_clients"] = clients_power_performance
factsheet["sustainability"]["emissions_communication_uplink"] = check_field_filled(factsheet, ["sustainability", "emissions_communication_uplink"], factsheet["system"]["total_upload_bytes"] * 2.24e-10 * factsheet["sustainability"]["avg_carbon_intensity_clients"], "")
factsheet["sustainability"]["emissions_communication_downlink"] = check_field_filled(factsheet, ["sustainability", "emissions_communication_downlink"], factsheet["system"]["total_download_bytes"] * 2.24e-10 * factsheet["sustainability"]["avg_carbon_intensity_server"], "")
except JSONDecodeError as e:
logger.warning(f"{factsheet_file} is invalid")
logger.error(e)
f.seek(0)
f.truncate()
json.dump(factsheet, f, indent=4)
f.close()
| 18,192 | Python | .py | 228 | 63.280702 | 793 | 0.617598 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,945 | pillar.py | enriquetomasmb_nebula/nebula/addons/trustworthiness/pillar.py | import logging
from nebula.addons.trustworthiness import calculation
from nebula.addons.trustworthiness.utils import get_input_value
logger = logging.getLogger(__name__)
class TrustPillar:
"""
Class to represent a trust pillar.
Args:
name (string): Name of the pillar.
metrics (dict): Metric definitions for the pillar.
input_docs (dict): Input documents.
use_weights (bool): True to turn on the weights in the metric config file.
"""
def __init__(self, name, metrics, input_docs, use_weights=False):
self.name = name
self.input_docs = input_docs
self.metrics = metrics
self.result = []
self.use_weights = use_weights
def evaluate(self):
"""
Evaluate the trust score for the pillar.
Returns:
float: Score of [0, 1].
"""
score = 0
avg_weight = 1 / len(self.metrics)
for key, value in self.metrics.items():
weight = value.get("weight", avg_weight) if self.use_weights else avg_weight
score += weight * self.get_notion_score(key, value.get("metrics"))
score = round(score, 2)
return score, {self.name: {"score": score, "notions": self.result}}
def get_notion_score(self, name, metrics):
"""
Evaluate the trust score for the notion.
Args:
name (string): Name of the notion.
metrics (list): Metrics definitions of the notion.
Returns:
float: Score of [0, 1].
"""
notion_score = 0
avg_weight = 1 / len(metrics)
metrics_result = []
for key, value in metrics.items():
metric_score = self.get_metric_score(metrics_result, key, value)
weight = value.get("weight", avg_weight) if self.use_weights else avg_weight
notion_score += weight * float(metric_score)
self.result.append({name: {"score": notion_score, "metrics": metrics_result}})
return notion_score
def get_metric_score(self, result, name, metric):
"""
Evaluate the trust score for the metric.
Args:
result (object): The result object
name (string): Name of the metric.
metrics (dict): The metric definition.
Returns:
float: Score of [0, 1].
"""
score = 0
try:
input_value = get_input_value(self.input_docs, metric.get("inputs"), metric.get("operation"))
score_type = metric.get("type")
if input_value is None:
logger.warning(f"{name} input value is null")
else:
if score_type == "true_score":
score = calculation.get_true_score(input_value, metric.get("direction"))
elif score_type == "score_mapping":
score = calculation.get_mapped_score(input_value, metric.get("score_map"))
elif score_type == "ranges":
score = calculation.get_range_score(input_value, metric.get("ranges"), metric.get("direction"))
elif score_type == "score_map_value":
score = calculation.get_map_value_score(input_value, metric.get("score_map"))
elif score_type == "scaled_score":
score = calculation.get_scaled_score(input_value, metric.get("scale"), metric.get("direction"))
elif score_type == "property_check":
score = 0 if input_value is None else input_value
else:
logger.warning(f"The score type {score_type} is not yet implemented.")
except KeyError:
logger.warning(f"Null input for {name} metric")
score = round(score, 2)
result.append({name: {"score": score}})
return score
| 3,859 | Python | .py | 86 | 33.918605 | 115 | 0.584333 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,946 | calculation.py | enriquetomasmb_nebula/nebula/addons/trustworthiness/calculation.py | import logging
import math
import numbers
import os.path
from datetime import datetime
from math import e
import numpy as np
import shap
import torch.nn
from art.estimators.classification import PyTorchClassifier
from art.metrics import clever_u
from scipy.stats import variation
from torch import nn, optim
import statistics
from codecarbon import EmissionsTracker
import pandas as pd
from os.path import exists
dirname = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
R_L1 = 40
R_L2 = 2
R_LI = 0.1
def get_mapped_score(score_key, score_map):
"""
Finds the score by the score_key in the score_map.
Args:
score_key (string): The key to look up in the score_map.
score_map (dict): The score map defined in the eval_metrics.json file.
Returns:
float: The normalized score of [0, 1].
"""
score = 0
if score_map is None:
logger.warning("Score map is missing")
else:
keys = [key for key, value in score_map.items()]
scores = [value for key, value in score_map.items()]
normalized_scores = get_normalized_scores(scores)
normalized_score_map = dict(zip(keys, normalized_scores))
score = normalized_score_map.get(score_key, np.nan)
return score
def get_normalized_scores(scores):
"""
Calculates the normalized scores of a list.
Args:
scores (list): The values that will be normalized.
Returns:
list: The normalized list.
"""
normalized = [(x - np.min(scores)) / (np.max(scores) - np.min(scores)) for x in scores]
return normalized
def get_range_score(value, ranges, direction="asc"):
"""
Maps the value to a range and gets the score by the range and direction.
Args:
value (int): The input score.
ranges (list): The ranges defined.
direction (string): Asc means the higher the range the higher the score, desc means otherwise.
Returns:
float: The normalized score of [0, 1].
"""
if not (type(value) == int or type(value) == float):
logger.warning("Input value is not a number")
logger.warning(f"{value}")
return 0
else:
score = 0
if ranges is None:
logger.warning("Score ranges are missing")
else:
total_bins = len(ranges) + 1
bin = np.digitize(value, ranges, right=True)
score = 1 - (bin / total_bins) if direction == "desc" else bin / total_bins
return score
def get_map_value_score(score_key, score_map):
"""
Finds the score by the score_key in the score_map and returns the value.
Args:
score_key (string): The key to look up in the score_map.
score_map (dict): The score map defined in the eval_metrics.json file.
Returns:
float: The score obtained in the score_map.
"""
score = 0
if score_map is None:
logger.warning("Score map is missing")
else:
score = score_map[score_key]
return score
def get_true_score(value, direction):
"""
Returns the negative of the value if direction is 'desc', otherwise returns value.
Args:
value (int): The input score.
direction (string): Asc means the higher the range the higher the score, desc means otherwise.
Returns:
float: The score obtained.
"""
if value is True:
return 1
elif value is False:
return 0
else:
if not (type(value) == int or type(value) == float):
logger.warning("Input value is not a number")
logger.warning(f"{value}.")
return 0
else:
if direction == "desc":
return 1 - value
else:
return value
def get_scaled_score(value, scale: list, direction: str):
"""
Maps a score of a specific scale into the scale between zero and one.
Args:
value (int or float): The raw value of the metric.
scale (list): List containing the minimum and maximum value the value can fall in between.
Returns:
float: The normalized score of [0, 1].
"""
score = 0
try:
value_min, value_max = scale[0], scale[1]
except Exception as e:
logger.warning("Score minimum or score maximum is missing. The minimum has been set to 0 and the maximum to 1")
value_min, value_max = 0, 1
if not value:
logger.warning("Score value is missing. Set value to zero")
else:
low, high = 0, 1
if value >= value_max:
score = 1
elif value <= value_min:
score = 0
else:
diff = value_max - value_min
diffScale = high - low
score = (float(value) - value_min) * (float(diffScale) / diff) + low
if direction == "desc":
score = high - score
return score
def get_value(value):
"""
Get the value of a metric.
Args:
value (float): The value of the metric.
Returns:
float: The value of the metric.
"""
return value
def check_properties(*args):
"""
Check if all the arguments have values.
Args:
args (list): All the arguments.
Returns:
float: The mean of arguments that have values.
"""
result = map(lambda x: x is not None and x != "", args)
return np.mean(list(result))
def get_cv(list=None, std=None, mean=None):
"""
Get the coefficient of variation.
Args:
list (list): List in which the coefficient of variation will be calculated.
std (float): Standard deviation of a list.
mean (float): Mean of a list.
Returns:
float: The coefficient of variation calculated.
"""
if std is not None and mean is not None:
return std / mean
if list is not None:
return np.std(list) / np.mean(list)
return 0
def get_global_privacy_risk(dp, epsilon, n):
"""
Calculates the global privacy risk by epsilon and the number of clients.
Args:
dp (bool): Indicates if differential privacy is used or not.
epsilon (int): The epsilon value.
n (int): The number of clients in the scenario.
Returns:
float: The global privacy risk.
"""
if dp is True and isinstance(epsilon, numbers.Number):
return 1 / (1 + (n - 1) * math.pow(e, -epsilon))
else:
return 1
def get_elapsed_time(scenario):
"""
Calculates the elapsed time during the execution of the scenario.
Args:
scenario (object): Scenario required.
Returns:
float: The elapsed time.
"""
start_time = scenario[1]
end_time = scenario[2]
start_date = datetime.strptime(start_time, "%d/%m/%Y %H:%M:%S")
end_date = datetime.strptime(end_time, "%d/%m/%Y %H:%M:%S")
elapsed_time = (end_date - start_date).total_seconds() / 60
return elapsed_time
def get_bytes_models(models_files):
"""
Calculates the mean bytes of the final models of the nodes.
Args:
models_files (list): List of final models.
Returns:
float: The mean bytes of the models.
"""
total_models_size = 0
number_models = len(models_files)
for file in models_files:
model_size = os.path.getsize(file)
total_models_size += model_size
avg_model_size = total_models_size / number_models
return avg_model_size
def get_bytes_sent_recv(bytes_sent_files, bytes_recv_files):
"""
Calculates the mean bytes sent and received of the nodes.
Args:
bytes_sent_files (list): Files that contain the bytes sent of the nodes.
bytes_recv_files (list): Files that contain the bytes received of the nodes.
Returns:
4-tupla: The total bytes sent, the total bytes received, the mean bytes sent and the mean bytes received of the nodes.
"""
total_upload_bytes = 0
total_download_bytes = 0
number_files = len(bytes_sent_files)
for file_bytes_sent, file_bytes_recv in zip(bytes_sent_files, bytes_recv_files):
with open(file_bytes_sent, "r") as f:
bytes_sent = f.read()
with open(file_bytes_recv, "r") as f:
bytes_recv = f.read()
total_upload_bytes += int(bytes_sent)
total_download_bytes += int(bytes_recv)
avg_upload_bytes = total_upload_bytes / number_files
avg_download_bytes = total_download_bytes / number_files
return total_upload_bytes, total_download_bytes, avg_upload_bytes, avg_download_bytes
def get_avg_loss_accuracy(loss_files, accuracy_files):
"""
Calculates the mean accuracy and loss models of the nodes.
Args:
loss_files (list): Files that contain the loss of the models of the nodes.
accuracy_files (list): Files that contain the acurracies of the models of the nodes.
Returns:
3-tupla: The mean loss of the models, the mean accuracies of the models, the standard deviation of the accuracies of the models.
"""
total_accuracy = 0
total_loss = 0
number_files = len(loss_files)
accuracies = []
for file_loss, file_accuracy in zip(loss_files, accuracy_files):
with open(file_loss, "r") as f:
loss = f.read()
with open(file_accuracy, "r") as f:
accuracy = f.read()
total_loss += float(loss)
total_accuracy += float(accuracy)
accuracies.append(float(accuracy))
avg_loss = total_loss / number_files
avg_accuracy = total_accuracy / number_files
std_accuracy = statistics.stdev(accuracies)
return avg_loss, avg_accuracy, std_accuracy
def get_feature_importance_cv(model, test_sample):
"""
Calculates the coefficient of variation of the feature importance.
Args:
model (object): The model.
test_sample (object): One test sample to calculate the feature importance.
Returns:
float: The coefficient of variation of the feature importance.
"""
try:
cv = 0
batch_size = 10
device = "cpu"
if isinstance(model, torch.nn.Module):
batched_data, _ = test_sample
n = batch_size
m = math.floor(0.8 * n)
background = batched_data[:m].to(device)
test_data = batched_data[m:n].to(device)
e = shap.DeepExplainer(model, background)
shap_values = e.shap_values(test_data)
if shap_values is not None and len(shap_values) > 0:
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
abs_sums = np.absolute(sums)
cv = variation(abs_sums)
except Exception as e:
logger.warning("Could not compute feature importance CV with shap")
cv = 1
if math.isnan(cv):
cv = 1
return cv
def get_clever_score(model, test_sample, nb_classes, learning_rate):
"""
Calculates the CLEVER score.
Args:
model (object): The model.
test_sample (object): One test sample to calculate the CLEVER score.
nb_classes (int): The nb_classes of the model.
learning_rate (float): The learning rate of the model.
Returns:
float: The CLEVER score.
"""
images, _ = test_sample
background = images[-1]
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), learning_rate)
# Create the ART classifier
classifier = PyTorchClassifier(model=model, loss=criterion, optimizer=optimizer, input_shape=(1, 28, 28), nb_classes=nb_classes)
score_untargeted = clever_u(classifier, background.numpy(), 10, 5, R_L2, norm=2, pool_factor=3, verbose=False)
return score_untargeted
def stop_emissions_tracking_and_save(tracker: EmissionsTracker, outdir: str, emissions_file: str, role: str, workload: str, sample_size: int = 0):
"""
Stops emissions tracking object from CodeCarbon and saves relevant information to emissions.csv file.
Args:
tracker (object): The emissions tracker object holding information.
outdir (str): The path of the output directory of the experiment.
emissions_file (str): The path to the emissions file.
role (str): Either client or server depending on the role.
workload (str): Either aggregation or training depending on the workload.
sample_size (int): The number of samples used for training, if aggregation 0.
"""
tracker.stop()
emissions_file = os.path.join(outdir, emissions_file)
if exists(emissions_file):
df = pd.read_csv(emissions_file)
else:
df = pd.DataFrame(columns=["role", "energy_grid", "emissions", "workload", "CPU_model", "GPU_model"])
try:
energy_grid = (tracker.final_emissions_data.emissions / tracker.final_emissions_data.energy_consumed) * 1000
df = pd.concat(
[
df,
pd.DataFrame(
{
"role": role,
"energy_grid": [energy_grid],
"emissions": [tracker.final_emissions_data.emissions],
"workload": workload,
"CPU_model": tracker.final_emissions_data.cpu_model if tracker.final_emissions_data.cpu_model else "None",
"GPU_model": tracker.final_emissions_data.gpu_model if tracker.final_emissions_data.gpu_model else "None",
"CPU_used": True if tracker.final_emissions_data.cpu_energy else False,
"GPU_used": True if tracker.final_emissions_data.gpu_energy else False,
"energy_consumed": tracker.final_emissions_data.energy_consumed,
"sample_size": sample_size,
}
),
],
ignore_index=True,
)
df.to_csv(emissions_file, encoding="utf-8", index=False)
except Exception as e:
logger.warning(e)
| 13,965 | Python | .py | 358 | 31.307263 | 146 | 0.634484 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,947 | utils.py | enriquetomasmb_nebula/nebula/addons/trustworthiness/utils.py | import ast
import json
import logging
import os
from json import JSONDecodeError
import pickle
import torch
import yaml
from dotmap import DotMap
from scipy.stats import entropy
from torch.utils.data import DataLoader
from hashids import Hashids
import pandas as pd
from os.path import exists
import math
from nebula.addons.trustworthiness import calculation
hashids = Hashids()
logger = logging.getLogger(__name__)
dirname = os.path.dirname(__file__)
def count_class_samples(scenario_name, dataloaders_files):
"""
Counts the number of samples by class.
Args:
scenario_name (string): Name of the scenario.
dataloaders_files (list): Files that contain the dataloaders.
"""
result = {}
dataloaders = []
for file in dataloaders_files:
with open(file, "rb") as f:
dataloader = pickle.load(f)
dataloaders.append(dataloader)
for dataloader in dataloaders:
for batch, labels in dataloader:
for b, label in zip(batch, labels):
l = hashids.encode(label.item())
if l in result:
result[l] += 1
else:
result[l] = 1
name_file = f"{dirname}/files/{scenario_name}/count_class.json"
with open(name_file, "w") as f:
json.dump(result, f)
def get_entropy(client_id, scenario_name, dataloader):
"""
Get the entropy of each client in the scenario.
Args:
client_id (int): The client id.
scenario_name (string): Name of the scenario.
dataloaders_files (list): Files that contain the dataloaders.
"""
result = {}
client_entropy = {}
name_file = f"{dirname}/files/{scenario_name}/entropy.json"
if os.path.exists(name_file):
with open(name_file, "r") as f:
client_entropy = json.load(f)
client_id_hash = hashids.encode(client_id)
for batch, labels in dataloader:
for b, label in zip(batch, labels):
l = hashids.encode(label.item())
if l in result:
result[l] += 1
else:
result[l] = 1
n = len(dataloader)
entropy_value = entropy([x / n for x in result.values()], base=2)
client_entropy[client_id_hash] = entropy_value
with open(name_file, "w") as f:
json.dump(client_entropy, f)
def read_csv(filename):
"""
Read a CSV file.
Args:
filename (string): Name of the file.
Returns:
object: The CSV readed.
"""
if exists(filename):
return pd.read_csv(filename)
def check_field_filled(factsheet_dict, factsheet_path, value, empty=""):
"""
Check if the field in the factsheet file is filled or not.
Args:
factsheet_dict (dict): The factshett dict.
factsheet_path (list): The factsheet field to check.
value (float): The value to add in the field.
empty (string): If the value could not be appended, the empty string is returned.
Returns:
float: The value added in the factsheet or empty if the value could not be appened
"""
if factsheet_dict[factsheet_path[0]][factsheet_path[1]]:
return factsheet_dict[factsheet_path[0]][factsheet_path[1]]
elif value != "" and value != "nan":
if type(value) != str and type(value) != list:
if math.isnan(value):
return 0
else:
return value
else:
return value
else:
return empty
def get_input_value(input_docs, inputs, operation):
"""
Gets the input value from input document and apply the metric operation on the value.
Args:
inputs_docs (map): The input document map.
inputs (list): All the inputs.
operation (string): The metric operation.
Returns:
float: The metric value
"""
input_value = None
args = []
for i in inputs:
source = i.get("source", "")
field = i.get("field_path", "")
input_doc = input_docs.get(source, None)
if input_doc is None:
logger.warning(f"{source} is null")
else:
input = get_value_from_path(input_doc, field)
args.append(input)
try:
operationFn = getattr(calculation, operation)
input_value = operationFn(*args)
except TypeError as e:
logger.warning(f"{operation} is not valid")
return input_value
def get_value_from_path(input_doc, path):
"""
Gets the input value from input document by path.
Args:
inputs_doc (map): The input document map.
path (string): The field name of the input value of interest.
Returns:
float: The input value from the input document
"""
d = input_doc
for nested_key in path.split("/"):
temp = d.get(nested_key)
if isinstance(temp, dict):
d = d.get(nested_key)
else:
return temp
return None
def write_results_json(out_file, dict):
"""
Writes the result to JSON.
Args:
out_file (string): The output file.
dict (dict): The object to be witten into JSON.
Returns:
float: The input value from the input document
"""
with open(out_file, "a") as f:
json.dump(dict, f, indent=4)
def save_results_csv(scenario_name: str, id: int, bytes_sent: int, bytes_recv: int, accuracy: float, loss: float, finish: bool):
outdir = f"{dirname}/files/{scenario_name}"
filename = "data_results.csv"
data_results_file = os.path.join(outdir, filename)
if exists(data_results_file):
df = pd.read_csv(data_results_file)
else:
# Crear un DataFrame con columnas especificadas
df = pd.DataFrame(columns=["id", "bytes_sent", "bytes_recv", "accuracy", "loss", "finish"])
try:
if id not in df["id"].values:
# Si no existe, agregar una nueva entrada con el ID del nodo
df = pd.concat([df, pd.DataFrame({"id": id, "bytes_sent": None, "bytes_recv": None, "accuracy": None, "loss": None, "finish": False})], ignore_index=True)
df.to_csv(data_results_file, encoding="utf-8", index=False)
else:
if bytes_sent is not None:
df.loc[df["id"] == id, "bytes_sent"] = bytes_sent
if bytes_recv is not None:
df.loc[df["id"] == id, "bytes_recv"] = bytes_recv
if accuracy is not None:
df.loc[df["id"] == id, "accuracy"] = accuracy
if loss is not None:
df.loc[df["id"] == id, "loss"] = loss
if finish:
df.loc[df["id"] == id, "finish"] = True
df.to_csv(data_results_file, encoding="utf-8", index=False)
except Exception as e:
logger.warning(e)
| 6,803 | Python | .py | 186 | 28.806452 | 166 | 0.612245 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,948 | metric.py | enriquetomasmb_nebula/nebula/addons/trustworthiness/metric.py | import json
import logging
import math
import os
import shutil
from json import JSONDecodeError
import numpy as np
import pandas as pd
from numpy import NaN
from tabulate import tabulate
from nebula.addons.trustworthiness.pillar import TrustPillar
from nebula.addons.trustworthiness.utils import write_results_json
dirname = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
class TrustMetricManager:
"""
Manager class to help store the output directory and handle calls from the FL framework.
"""
def __init__(self):
self.factsheet_file_nm = "factsheet.json"
self.eval_metrics_file_nm = "eval_metrics.json"
self.nebula_trust_results_nm = "nebula_trust_results.json"
def evaluate(self, scenario, weights, use_weights=False):
"""
Evaluates the trustworthiness score.
Args:
scenario (object): The scenario in whith the trustworthiness will be calculated.
weights (dict): The desired weghts of the pillars.
use_weights (bool): True to turn on the weights in the metric config file, default to False.
"""
# Get scenario name
scenario_name = scenario[0]
factsheet_file = os.path.join(dirname, f"files/{scenario_name}/{self.factsheet_file_nm}")
metrics_cfg_file = os.path.join(dirname, f"configs/{self.eval_metrics_file_nm}")
results_file = os.path.join(dirname, f"files/{scenario_name}/{self.nebula_trust_results_nm}")
if not os.path.exists(factsheet_file):
logger.error(f"{factsheet_file} is missing! Please check documentation.")
return
if not os.path.exists(metrics_cfg_file):
logger.error(f"{metrics_cfg_file} is missing! Please check documentation.")
return
with open(factsheet_file, "r") as f, open(metrics_cfg_file, "r") as m:
factsheet = json.load(f)
metrics_cfg = json.load(m)
metrics = metrics_cfg.items()
input_docs = {"factsheet": factsheet}
result_json = {"trust_score": 0, "pillars": []}
final_score = 0
result_print = []
for key, value in metrics:
pillar = TrustPillar(key, value, input_docs, use_weights)
score, result = pillar.evaluate()
weight = weights.get(key)
final_score += weight * score
result_print.append([key, score])
result_json["pillars"].append(result)
final_score = round(final_score, 2)
result_json["trust_score"] = final_score
write_results_json(results_file, result_json)
| 2,679 | Python | .py | 59 | 36.525424 | 104 | 0.644035 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,949 | blockchain_deployer.py | enriquetomasmb_nebula/nebula/addons/blockchain/blockchain_deployer.py | import os.path
import random
import shutil
import textwrap
import json
from datetime import datetime
from typing import Tuple
from web3 import Web3
from eth_keys import keys
w3 = Web3()
class BlockchainDeployer:
"""
Creates files (docker-compose.yaml and genesis.json) for deploying blockchain network
"""
def __init__(self, n_validator=3, config_dir=".", input_dir="."):
# root dir of blockchain folder
self.__input_dir = input_dir
# config folder for storing generated files for deployment
self.__config_dir = config_dir
# random but static id of boot node to be assigned to all other nodes
self.__boot_id = None
# ip address of boot node (needs to be static)
self.__boot_ip = "172.25.0.101"
# ip address of non-validator node (needs to be static)
self.__rpc_ip = "172.25.0.104"
# ip address of oracle (needs to be static)
self.__oracle_ip = "172.25.0.105"
# temporary yaml parameter to store config before dump
self.__yaml = str()
# list of reserved addresses which need to be excluded in random address generation
self.__reserved_addresses = set()
# load original genesis dict
self.__genesis = self.__load_genesis()
# create blockchain directory in scenario's config directory
self.__setup_dir()
# add a boot node to the yaml file
self.__add_boot_node()
# add n validator nodes to the genesis.json and yaml file
self.__add_validator(n_validator)
# add non-validator node to the yaml file
self.__add_rpc()
# add oracle node to the genesis.json and yaml file
self.__add_oracle()
# dump config files into scenario's config directory
self.__export_config()
def __setup_dir(self) -> None:
if not os.path.exists(self.__config_dir):
os.makedirs(self.__config_dir, exist_ok=True)
def __get_unreserved_address(self) -> Tuple[int, int]:
"""
Computes a randomized port and last 8 bits of an ip address, where both are not yet used
Returns: Randomized and unreserved lat 8 bit of ip and port
"""
# extract reserved ports and ip addresses
reserved_ips = [address[0] for address in self.__reserved_addresses]
reserved_ports = [address[1] for address in self.__reserved_addresses]
# get randomized ip and port in range still unreserved
ip = random.choice([number for number in range(10, 254) if number not in reserved_ips])
port = random.choice([number for number in range(30310, 30360) if number not in reserved_ports])
# add network address to list of reserved addresses
self.__reserved_addresses.add((ip, port))
return ip, port
def __copy_dir(self, source_path) -> None:
"""
Copy blockchain folder with current files such as chaincode to config folder
Args:
source_path: Path of dir to copy
Returns: None
"""
curr_path = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(self.__config_dir):
os.makedirs(self.__config_dir, exist_ok=True)
target_dir = os.path.join(self.__config_dir, source_path)
source_dir = os.path.join(curr_path, source_path)
shutil.copytree(str(source_dir), target_dir, dirs_exist_ok=True)
@staticmethod
def __load_genesis() -> dict[str, int | str | dict]:
"""
Load original genesis config
Returns: Genesis json dict
"""
return {
"config": {
"chainId": 19265019, # unique id not used by any public Ethereum network
# block number at which the defined EIP hard fork policies are applied
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"muirGlacierBlock": 0,
"berlinBlock": 0,
# Proof-of-Authority settings
"clique": {"period": 1, "epoch": 10000}, # block time (time in seconds between two blocks) # number of blocks after reset the pending votes
},
# unique continuous id of transactions used by PoA
"nonce": "0x0",
# UNIX timestamp of block creation
"timestamp": "0x5a8efd25",
# strictly formated string containing all public wallet addresses of all validators (PoA)
# will be replaced by public addresses of randomly generated validator node
"extraData": "0x0000000000000000000000000000000000000000000000000000000000000000187c1c14c75bA185A59c621Fbe5dda26D488852DF20C144e8aE3e1aCF7071C4883B759D1B428e7930000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# maximum gas (computational cost) per transaction
"gasLimit": "9000000000000", # "8000000" is default for Ethereum but too low for heavy load
# difficulty for PoW
"difficulty": "0x1",
# root hash of block
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
# validator of genesis block
"coinbase": "0x0000000000000000000000000000000000000000",
# prefunded public wallet addresses (Oracle)
"alloc": {
# will be replaced by Oracle's randomized address
"0x61DE01FcD560da4D6e05E58bCD34C8Dc92CE36D1": {"balance": "0x200000000000000000000000000000000000000000000000000000000000000"}
},
# block number of genesis block
"number": "0x0",
# gas used to validate genesis block
"gasUsed": "0x0",
# hash of parent block (0x0 since first block)
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
}
def __add_boot_node(self) -> None:
"""
Adds boot node to docker-compose.yaml
Returns: None
"""
# create random private key and create account from it
acc = w3.eth.account.create()
# store id of boot node to be inserted into all other nodes
self.__boot_id = str(keys.PrivateKey(acc.key).public_key)[2:]
# add service to yaml string
self.__yaml += textwrap.dedent(
f"""
geth-bootnode:
hostname: geth-bootnode
environment:
- nodekeyhex={w3.to_hex(acc.key)[2:]}
build:
dockerfile: {self.__input_dir}/geth/boot.dockerfile
container_name: boot
networks:
chainnet:
ipv4_address: {self.__boot_ip}
"""
)
def __add_validator(self, cnt) -> None:
"""
Randomly generates and adds number(cnt) of validator nodes to yaml and genesis.json
Args:
cnt: number of validator nodes to cresate
Returns: None
"""
validator_addresses = list()
for id in range(cnt):
# create random private key and create account from it
acc = w3.eth.account.create()
validator_addresses.append(acc.address[2:])
# get random network address
ip, port = self.__get_unreserved_address()
self.__yaml += textwrap.dedent(
f"""
geth-validator-{id}:
hostname: geth-validator-{id}
depends_on:
- geth-bootnode
environment:
- address={acc.address}
- bootnodeId={self.__boot_id}
- bootnodeIp={self.__boot_ip}
- port={port}
build:
dockerfile: {self.__input_dir}/geth/validator.dockerfile
args:
privatekey: {w3.to_hex(acc.key)[2:]}
password: {w3.to_hex(w3.eth.account.create().key)}
container_name: validator_{id}
networks:
chainnet:
ipv4_address: 172.25.0.{ip}
"""
)
# create specific Ethereum extra data string for PoA with all public addresses of validators
extra_data = "0x" + "0" * 64 + "".join([a for a in validator_addresses]) + 65 * "0" + 65 * "0"
self.__genesis["extraData"] = extra_data
def __add_oracle(self) -> None:
"""
Adds Oracle node to yaml and genesis.json
Returns: None
"""
# create random private key and create account from it
acc = w3.eth.account.create()
# prefund oracle by allocating all funds to its public wallet address
self.__genesis["alloc"] = {acc.address: {"balance": "0x200000000000000000000000000000000000000000000000000000000000000"}}
self.__yaml += textwrap.dedent(
f"""
oracle:
hostname: oracle
depends_on:
- geth-rpc
- geth-bootnode
environment:
- PRIVATE_KEY={w3.to_hex(acc.key)[2:]}
- RPC_IP={self.__rpc_ip}
build:
dockerfile: {self.__input_dir}/geth/oracle.dockerfile
context: {self.__input_dir}
ports:
- 8081:8081
container_name: oracle
networks:
chainnet:
ipv4_address: {self.__oracle_ip}
"""
)
def __add_rpc(self):
"""
Add non-validator node to yaml
Returns: None
"""
# create random private key and create account from it
acc = w3.eth.account.create()
self.__yaml += textwrap.dedent(
f"""
geth-rpc:
hostname: geth-rpc
depends_on:
- geth-bootnode
environment:
- address={acc.address}
- bootnodeId={self.__boot_id}
- bootnodeIp={self.__boot_ip}
build:
dockerfile: {self.__input_dir}/geth/rpc.dockerfile
ports:
- 8545:8545
container_name: rpc
networks:
chainnet:
ipv4_address: {self.__rpc_ip}
"""
)
def __add_network(self) -> None:
"""
Adds network config to docker-compose.yaml to create a private network for docker compose
Returns: None
"""
self.__yaml += textwrap.dedent(
f"""
networks:
chainnet:
name: chainnet
driver: bridge
ipam:
config:
- subnet: 172.25.0.0/24
"""
)
def __export_config(self) -> None:
"""
Writes configured yaml and genesis files to config folder for deplyoment
Returns: None
"""
# format yaml and add docker compose properties
final_str = textwrap.indent(f"""{self.__yaml}""", " ")
self.__yaml = textwrap.dedent(
f"""
version: "3.8"
name: blockchain
services:
"""
)
self.__yaml += final_str
# add network config last
self.__add_network()
with open(f"{self.__config_dir}/blockchain-docker-compose.yml", "w+") as file:
file.write(self.__yaml)
with open(f"{self.__input_dir}/geth/genesis.json", "w+") as file:
json.dump(self.__genesis, file, indent=4)
source = os.path.join(self.__input_dir, "geth", "genesis.json")
shutil.copy(source, os.path.join(self.__config_dir, "genesis.json"))
source = os.path.join(self.__input_dir, "chaincode", "reputation_system.sol")
shutil.copy(source, os.path.join(self.__config_dir, "reputation_system.sol"))
if __name__ == "__main__":
b = BlockchainDeployer(n_validator=3, config_dir=os.path.join("deployments", datetime.now().strftime("%Y-%m-%d_%H-%M")))
| 12,599 | Python | .py | 288 | 31.388889 | 304 | 0.564614 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,950 | app.py | enriquetomasmb_nebula/nebula/addons/blockchain/oracle/app.py | import datetime
import os
import json
from functools import wraps
from typing import Mapping, List
import requests
from retry import retry
from solcx import compile_standard, install_solc
from web3 import Web3
from eth_account import Account
from flask import Flask, jsonify, request
from web3.middleware import construct_sign_and_send_raw_middleware
from web3.middleware import geth_poa_middleware
app = Flask(__name__)
def error_handler(func):
"""Adds default status and header to all REST responses used for Oracle"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs), 200, {"Content-Type": "application/json"}
except Exception as e:
return jsonify({"error": str(e)}), 500, {"Content-Type": "application/json"}
return wrapper
class Oracle:
def __init__(self):
# header file, required for interacting with chain code
self.__contract_abi = dict()
# stores gas expenses for experiments
self.__gas_store = list()
# stores timing records for experiments
self.__time_store = list()
# stores reputation records for experiments
self.__reputation_store = list()
# current (03.2024) average amount of WEI to pay for a unit of gas
self.__gas_price_per_unit = float(27.3)
# current (03.2024) average price in USD per WEI
self.__price_USD_per_WEI = float(0.00001971)
# static ip address of non-validator node (RPC)
self.__blockchain_address = "http://172.25.0.104:8545"
# executes RPC request to non-validator node until ready
self.__ready = self.wait_for_blockchain()
# creates an account from the primary key stored in the envs
self.acc = self.__create_account()
# create Web3 object for making transactions
self.__web3 = self.__initialize_web3()
# create a Web3 contract object from the compiled chaincode
self.contract_obj = self.__compile_chaincode()
# deploy the contract to the blockchain network
self.__contract_address = self.deploy_chaincode()
# update the contract object with the address
self.contract_obj = self.__web3.eth.contract(abi=self.contract_obj.abi, bytecode=self.contract_obj.bytecode, address=self.contract_address)
@property
def contract_abi(self):
return self.__contract_abi
@property
def contract_address(self):
return self.__contract_address
@retry((Exception, requests.exceptions.HTTPError), tries=20, delay=10)
def wait_for_blockchain(self) -> bool:
"""
Executes REST post request for a selected RPC method to check if blockchain
is up and running
Returns: None
"""
headers = {"Content-type": "application/json", "Accept": "application/json"}
data = {"jsonrpc": "2.0", "method": "eth_accounts", "id": 1, "params": []}
request = requests.post(url=self.__blockchain_address, json=data, headers=headers)
# raise Exception if status is an error one
request.raise_for_status()
print(f"ORACLE: RPC node up and running", flush=True)
return True
def __initialize_web3(self):
"""
Initializes Web3 object and configures it for PoA protocol
Returns: Web3 object
"""
# initialize Web3 object with ip of non-validator node
web3 = Web3(Web3.HTTPProvider(self.__blockchain_address, request_kwargs={"timeout": 20})) # 10
# inject Proof-of-Authority settings to object
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
# automatically sign transactions if available for execution
web3.middleware_onion.add(construct_sign_and_send_raw_middleware(self.acc))
# inject local account as default
web3.eth.default_account = self.acc.address
# return initialized object for executing transaction
print(f"SUCCESS: Account created at {self.acc.address}")
return web3
def __compile_chaincode(self):
"""
Compile raw chaincode and create Web3 contract object with it
Returns: Web3 contract object
"""
# open raw solidity file
with open("reputation_system.sol", "r") as file:
simple_storage_file = file.read()
# set compiler version
install_solc("0.8.22")
# compile solidity code
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"reputation_system.sol": {"content": simple_storage_file}},
"settings": {"evmVersion": "paris", "outputSelection": {"*": {"*": ["abi", "metadata", "evm.bytecode", "evm.sourceMap"]}}, "optimizer": {"enabled": True, "runs": 1000}},
},
solc_version="0.8.22",
)
# store compiled code as json
with open("compiled_code.json", "w") as file:
json.dump(compiled_sol, file)
# retrieve bytecode from the compiled contract
contract_bytecode = compiled_sol["contracts"]["reputation_system.sol"]["ReputationSystem"]["evm"]["bytecode"]["object"]
# retrieve ABI from compiled contract
self.__contract_abi = json.loads(compiled_sol["contracts"]["reputation_system.sol"]["ReputationSystem"]["metadata"])["output"]["abi"]
print(f"Oracle: Solidity files compiled and bytecode ready", flush=True)
# return draft Web3 contract object
return self.__web3.eth.contract(abi=self.__contract_abi, bytecode=contract_bytecode)
@staticmethod
def __create_account():
"""
Retrieves the private key from the envs, set during docker build
Returns: Web3 account object
"""
# retrieve private key, set during ducker build
private_key = os.environ.get("PRIVATE_KEY")
# return Web3 account object
return Account.from_key("0x" + private_key)
@retry((Exception, requests.exceptions.HTTPError), tries=3, delay=4)
def transfer_funds(self, address):
"""
Creates transaction to blockchain network for assigning funds to Cores
Args:
address: public wallet address of Core to assign funds to
Returns: Transaction receipt
"""
# create raw transaction with all required parameters to change state of ledger
raw_transaction = {
"chainId": self.__web3.eth.chain_id,
"from": self.acc.address,
"value": self.__web3.to_wei("500", "ether"),
"to": self.__web3.to_checksum_address(address),
"nonce": self.__web3.eth.get_transaction_count(self.acc.address, "pending"),
"gasPrice": self.__web3.to_wei(self.__gas_price_per_unit, "gwei"),
"gas": self.__web3.to_wei("22000", "wei"),
}
# sign transaction with private key and execute it
tx_receipt = self.__sign_and_deploy(raw_transaction)
# return transaction receipt
return f"SUCESS: {tx_receipt}"
def __sign_and_deploy(self, trx_hash):
"""
Signs a function call to the chain code with the primary key and awaits the receipt
Args:
trx_hash: Transformed dictionary of all properties relevant for call to chain code
Returns: transaction receipt confirming the successful write to the ledger
"""
# transaction is signed with private key
signed_transaction = self.__web3.eth.account.sign_transaction(trx_hash, private_key=self.acc.key)
# confirmation that transaction was passed from non-validator node to validator nodes
executed_transaction = self.__web3.eth.send_raw_transaction(signed_transaction.rawTransaction)
# non-validator node awaited the successful validation by validation nodes and returns receipt
transaction_receipt = self.__web3.eth.wait_for_transaction_receipt(executed_transaction, timeout=20) # 5
# report used gas for experiment
self.report_gas(transaction_receipt.gasUsed, 0)
return transaction_receipt
@retry(Exception, tries=20, delay=5)
def deploy_chaincode(self):
"""
Creates transaction to deploy chain code on the blockchain network by
sending transaction to non-validator node
Returns: address of chain code on the network
"""
# create raw transaction with all properties to deploy contract
raw_transaction = self.contract_obj.constructor().build_transaction(
{"chainId": self.__web3.eth.chain_id, "from": self.acc.address, "value": self.__web3.to_wei("3", "ether"), "gasPrice": self.__web3.to_wei(self.__gas_price_per_unit, "gwei"), "nonce": self.__web3.eth.get_transaction_count(self.acc.address, "pending")}
)
# sign transaction with private key and executes it
tx_receipt = self.__sign_and_deploy(raw_transaction)
# store the address received from the non-validator node
contract_address = tx_receipt["contractAddress"]
# returns contract address to provide to the cores later
return contract_address
def get_balance(self, addr):
"""
Creates transaction to blockchain network to request balance for parameter address
Args:
addr: public wallet address of account
Returns: current balance in ether (ETH)
"""
# converts address type required for making a transaction
cAddr = self.__web3.to_checksum_address(addr)
# executes the transaction directly, no signing required
balance = self.__web3.eth.get_balance(cAddr, "pending")
# returns JSON response with ether balance to requesting core
return {"address": cAddr, "balance_eth": self.__web3.from_wei(balance, "ether")}
def report_gas(self, amount: int, aggregation_round: int) -> None:
"""
Experiment method for collecting and reporting gas usage statistics
Args:
aggregation_round: Aggregation round of sender
amount: Amount of gas spent in WEI
Returns: None
"""
# store the recorded gas for experiment
self.__gas_store.append((amount, aggregation_round))
def get_gas_report(self) -> Mapping[str, str]:
"""
Experiment method for requesting the summed up records of reported gas usage
Returns: JSON with name:value (WEI/USD) for every reported node
"""
# sum up all reported costs
total_wei = sum((record[0] for record in self.__gas_store))
# convert sum in WEI to USD by computing with gas price USD per WEI
total_usd = round(total_wei * self.__price_USD_per_WEI)
return {"Sum (WEI)": total_wei, "Sum (USD)": f"{total_usd:,}"}
@property
def gas_store(self):
"""
Experiment method for requesting the detailed records of the gas reports
Returns: list of records of type: list[(node, timestamp, gas)]
"""
return self.__gas_store
def report_time(self, time_s: float, aggregation_round: int) -> None:
"""
Experiment method for collecting and reporting time statistics
Args:
aggregation_round: Aggregation round of node
method: Name of node which reports time
time_s: Amount of time spend on method
Returns: None
"""
# store the recorded time for experiment
self.__time_store.append((time_s, aggregation_round))
def report_reputation(self, records: list, aggregation_round: int, sender: str) -> None:
"""
Experiment method for collecting and reporting reputations statistics
Args:
aggregation_round: Current aggregation round of sender
records: list of (name:reputation) records
sender: node reporting its local view
Returns: None
"""
# store the recorded reputation for experiment
self.__reputation_store.extend([(record[0], record[1], aggregation_round, sender) for record in records])
@property
def time_store(self) -> list:
"""
Experiment method for requesting all records of nodes which reported timings
Returns: JSON with method:(sum_time, n_calls) for every reported node
"""
return self.__time_store
@property
def reputation_store(self) -> list:
"""
Experiment method for requesting all records of reputations
Returns: list with (name, reputation, timestamp)
"""
return self.__reputation_store
@property
def ready(self) -> bool:
"""
Returns true if the Oracle is ready itself and the chain code was deployed successfully
Returns: True if ready False otherwise
"""
return self.__ready
@app.route("/")
@error_handler
def home():
return jsonify({"Message": "Oracle up and running"})
@app.route("/faucet", methods=["POST"])
@error_handler
def rest_transfer_funds():
address = request.get_json().get("address")
return jsonify({"Message": oracle.transfer_funds(address)})
@app.route("/gas", methods=["POST"])
@error_handler
def rest_report_gas():
amount = request.get_json().get("amount")
aggregation_round = request.get_json().get("round")
oracle.report_gas(amount, aggregation_round)
return jsonify(oracle.get_gas_report())
@app.route("/balance", methods=["GET"])
@error_handler
def rest_get_balance():
addr = request.get_json().get("address")
return jsonify(oracle.get_balance(addr))
@app.route("/status", methods=["GET"])
@error_handler
def rest_status():
if not oracle.ready:
return {"message": "Blockchain does not respond, wait 10"}
else:
return {"message": "Blockchain responded"}
@app.route("/contract", methods=["GET"])
@error_handler
def rest_contract():
return jsonify({"address": oracle.contract_address, "abi": oracle.contract_abi})
@app.route("/gas", methods=["GET"])
@error_handler
def rest_get_gas_report():
return oracle.get_gas_report()
@app.route("/gas_series", methods=["GET"])
@error_handler
def rest_get_gas_series():
return oracle.gas_store
@app.route("/time", methods=["POST"])
@error_handler
def rest_report_time():
time = request.get_json().get("time")
aggregation_round = request.get_json().get("round")
oracle.report_time(time, aggregation_round)
return jsonify({"Message": "Reported time successfully"})
@app.route("/time", methods=["GET"])
@error_handler
def rest_get_time_report():
return oracle.time_store
@app.route("/reputation", methods=["POST"])
@error_handler
def rest_report_reputation():
records = request.get_json().get("records")
round = request.get_json().get("round")
sender = request.get_json().get("sender")
oracle.report_reputation(records, round, sender)
return jsonify({"Message": "Reported reputation successfully"})
@app.route("/reputation", methods=["GET"])
@error_handler
def rest_get_reputation_timeseries():
return oracle.reputation_store
if __name__ == "__main__":
oracle = Oracle()
app.run(debug=False, host="0.0.0.0", port=8081)
| 15,259 | Python | .py | 330 | 38.475758 | 262 | 0.661036 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,951 | main.py | enriquetomasmb_nebula/nebula/tests/main.py | import json
import logging
import os
import sys
from datetime import datetime
import docker
# Constants
TIMEOUT = 3600
# Detect CTRL+C from parent process
def signal_handler(signal, frame):
logging.info("You pressed Ctrl+C [test]!")
sys.exit(0)
# Create nebula netbase if it does not exist
def create_docker_network():
client = docker.from_env()
try:
ipam_pool = docker.types.IPAMPool(subnet="192.168.10.0/24", gateway="192.168.10.1")
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
client.networks.create("nebula-net-base", driver="bridge", ipam=ipam_config)
print("Docker network created successfully.")
except docker.errors.APIError as e:
print(f"Error creating Docker network: {e}")
# To add a new test create the option in the menu and a [test].json file in tests folder
def menu():
# clear terminal
if os.name == "nt":
os.system("cls")
else:
os.system("clear")
banner = """
‚ñà‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïó‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïó‚ñà‚ñà‚ïó ‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó
‚ñà‚ñà‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïë‚ñà‚ñà‚ïî‚ïê‚ïê‚ïê‚ïê‚ïù‚ñà‚ñà‚ïî‚ïê‚ïê‚ñà‚ñà‚ïó‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïë‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïî‚ïê‚ïê‚ñà‚ñà‚ïó
‚ñà‚ñà‚ïî‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïë‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó ‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïî‚ïù‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïë‚ñà‚ñà‚ïë ‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïë
‚ñà‚ñà‚ïë‚ïö‚ñà‚ñà‚ïó‚ñà‚ñà‚ïë‚ñà‚ñà‚ïî‚ïê‚ïê‚ïù ‚ñà‚ñà‚ïî‚ïê‚ïê‚ñà‚ñà‚ïó‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïë‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïî‚ïê‚ïê‚ñà‚ñà‚ïë
‚ñà‚ñà‚ïë ‚ïö‚ñà‚ñà‚ñà‚ñà‚ïë‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïî‚ïù‚ïö‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïî‚ïù‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïë
‚ïö‚ïê‚ïù ‚ïö‚ïê‚ïê‚ïê‚ïù‚ïö‚ïê‚ïê‚ïê‚ïê‚ïê‚ïê‚ïù‚ïö‚ïê‚ïê‚ïê‚ïê‚ïê‚ïù ‚ïö‚ïê‚ïê‚ïê‚ïê‚ïê‚ïù ‚ïö‚ïê‚ïê‚ïê‚ïê‚ïê‚ïê‚ïù‚ïö‚ïê‚ïù ‚ïö‚ïê‚ïù
A Platform for Decentralized Federated Learning
Created by Enrique Tomás Martínez Beltrán
https://github.com/enriquetomasmb/nebula
"""
print("\x1b[0;36m" + banner + "\x1b[0m")
options = """
[1] Aggregation test
[2] Topology test
[3] Dataset test
[4] Attacks test
[5] Custom test
CTRL + C to exit
"""
while True:
print("\x1b[0;36m" + options + "\x1b[0m")
selectedOption = input("\x1b[0;36m" + "> " + "\x1b[0m")
if selectedOption == "1":
run_test(os.path.join(os.path.dirname(__file__), "aggregation.json"))
elif selectedOption == "2":
run_test(os.path.join(os.path.dirname(__file__), "topology.json"))
elif selectedOption == "3":
run_test(os.path.join(os.path.dirname(__file__), "datasets.json"))
elif selectedOption == "4":
run_test(os.path.join(os.path.dirname(__file__), "attacks.json"))
elif selectedOption == "5":
run_test(os.path.join(os.path.dirname(__file__), "custom.json"))
else:
print("Choose a valid option")
# Check for error logs
def check_error_logs(test_name, scenario_name):
try:
log_dir = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "app", "logs"))
current_log = os.path.join(log_dir, scenario_name)
test_dir = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "app", "tests"))
output_log_path = os.path.join(test_dir, test_name + ".log")
if not os.path.exists(test_dir):
try:
os.mkdir(test_dir)
except Exception as e:
logging.error(f"Error creating test directory: {e}")
with open(output_log_path, "a", encoding="utf-8") as f:
f.write(f"Scenario: {scenario_name}\n")
for log_file in os.listdir(current_log):
if log_file.endswith("_error.log"):
log_file_path = os.path.join(current_log, log_file)
try:
with open(log_file_path, "r", encoding="utf-8") as file:
content = file.read().strip()
if content:
f.write(f"{log_file} ‚ùå Errors found:\n{content}\n")
else:
f.write(f"{log_file} ‚úÖ No errors found\n")
except Exception as e:
f.write(f"Error reading {log_file}: {e}\n")
f.write("-" * os.get_terminal_size().columns + "\n")
except Exception as e:
print(f"Failed to write to log file {test_name + '.log'}: {e}")
return output_log_path
# Load test from .json file
def load_test(test_path):
with open(test_path, "r", encoding="utf-8") as file:
scenarios = json.load(file)
return scenarios
# Run selected test
def run_test(test_path):
test_name = f"test_nebula_{os.path.splitext(os.path.basename(test_path))[0]}_" + datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
for scenario in load_test(test_path):
scenarioManagement = run_scenario(scenario)
finished = scenarioManagement.scenario_finished(TIMEOUT)
if finished:
test_log_path = check_error_logs(test_name, scenarioManagement.scenario_name)
else:
test_dir = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "app", "tests"))
output_log_path = os.path.join(test_dir, test_name + ".log")
if not os.path.exists(test_dir):
try:
os.mkdir(test_dir)
except Exception as e:
logging.error(f"Error creating test directory: {e}")
try:
with open(output_log_path, "a", encoding="utf-8") as f:
f.write(f"Scenario: {scenarioManagement.scenario_name} \n")
f.write(f"üïí‚ùå Timeout reached \n")
f.write("-" * os.get_terminal_size().columns + "\n")
except Exception as e:
print(f"Failed to write to log file {test_name + '.log'}: {e}")
pass
print("Results:")
try:
with open(test_log_path, "r", encoding="utf-8") as f:
print(f.read())
except Exception as e:
print(f"Failed to read the log file {test_name + '.log'}: {e}")
# Run a single scenario
def run_scenario(scenario):
from nebula.scenarios import ScenarioManagement
import subprocess
# Manager for the actual scenario
scenarioManagement = ScenarioManagement(scenario, "nebula-test")
# Run the actual scenario
try:
if scenarioManagement.scenario.mobility:
additional_participants = scenario["additional_participants"]
schema_additional_participants = scenario["schema_additional_participants"]
scenarioManagement.load_configurations_and_start_nodes(additional_participants, schema_additional_participants)
else:
scenarioManagement.load_configurations_and_start_nodes()
except subprocess.CalledProcessError as e:
logging.error(f"Error docker-compose up: {e}")
return scenarioManagement
if __name__ == "__main__":
create_docker_network()
menu()
| 7,178 | Python | .py | 149 | 39.073826 | 147 | 0.601545 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,952 | role.py | enriquetomasmb_nebula/nebula/core/role.py | class Role:
"""
This class defines the participant roles of the platform.
"""
TRAINER = "trainer"
AGGREGATOR = "aggregator"
PROXY = "proxy"
IDLE = "idle"
SERVER = "server"
| 205 | Python | .py | 9 | 18.111111 | 61 | 0.620513 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,953 | engine.py | enriquetomasmb_nebula/nebula/core/engine.py | import asyncio
import logging
import os
import docker
from nebula.addons.functions import print_msg_box
from nebula.addons.attacks.attacks import create_attack
from nebula.addons.reporter import Reporter
from nebula.core.aggregation.aggregator import create_aggregator, create_malicious_aggregator, create_target_aggregator
from nebula.core.eventmanager import EventManager, event_handler
from nebula.core.network.communications import CommunicationsManager
from nebula.core.pb import nebula_pb2
from nebula.core.utils.locker import Locker
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("fsspec").setLevel(logging.WARNING)
logging.getLogger("matplotlib").setLevel(logging.ERROR)
logging.getLogger("aim").setLevel(logging.ERROR)
logging.getLogger("plotly").setLevel(logging.ERROR)
from nebula.config.config import Config
from nebula.core.training.lightning import Lightning
from nebula.core.utils.helper import cosine_metric
import sys
import pdb
def handle_exception(exc_type, exc_value, exc_traceback):
logging.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
pdb.set_trace()
pdb.post_mortem(exc_traceback)
def signal_handler(sig, frame):
print("Signal handler called with signal", sig)
print("Exiting gracefully")
sys.exit(0)
def print_banner():
banner = """
‚ñà‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïó‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïó‚ñà‚ñà‚ïó ‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó
‚ñà‚ñà‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïë‚ñà‚ñà‚ïî‚ïê‚ïê‚ïê‚ïê‚ïù‚ñà‚ñà‚ïî‚ïê‚ïê‚ñà‚ñà‚ïó‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïë‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïî‚ïê‚ïê‚ñà‚ñà‚ïó
‚ñà‚ñà‚ïî‚ñà‚ñà‚ïó ‚ñà‚ñà‚ïë‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó ‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïî‚ïù‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïë‚ñà‚ñà‚ïë ‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïë
‚ñà‚ñà‚ïë‚ïö‚ñà‚ñà‚ïó‚ñà‚ñà‚ïë‚ñà‚ñà‚ïî‚ïê‚ïê‚ïù ‚ñà‚ñà‚ïî‚ïê‚ïê‚ñà‚ñà‚ïó‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïë‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïî‚ïê‚ïê‚ñà‚ñà‚ïë
‚ñà‚ñà‚ïë ‚ïö‚ñà‚ñà‚ñà‚ñà‚ïë‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïî‚ïù‚ïö‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïî‚ïù‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ñà‚ïó‚ñà‚ñà‚ïë ‚ñà‚ñà‚ïë
‚ïö‚ïê‚ïù ‚ïö‚ïê‚ïê‚ïê‚ïù‚ïö‚ïê‚ïê‚ïê‚ïê‚ïê‚ïê‚ïù‚ïö‚ïê‚ïê‚ïê‚ïê‚ïê‚ïù ‚ïö‚ïê‚ïê‚ïê‚ïê‚ïê‚ïù ‚ïö‚ïê‚ïê‚ïê‚ïê‚ïê‚ïê‚ïù‚ïö‚ïê‚ïù ‚ïö‚ïê‚ïù
A Platform for Decentralized Federated Learning
Created by Enrique Tomás Martínez Beltrán
https://github.com/enriquetomasmb/nebula
"""
logging.info(f"\n{banner}\n")
class Engine:
def __init__(
self,
model,
dataset,
config=Config,
trainer=Lightning,
security=False,
model_poisoning=False,
poisoned_ratio=0,
noise_type="gaussian",
):
self.config = config
self.idx = config.participant["device_args"]["idx"]
self.experiment_name = config.participant["scenario_args"]["name"]
self.ip = config.participant["network_args"]["ip"]
self.port = config.participant["network_args"]["port"]
self.addr = config.participant["network_args"]["addr"]
self.role = config.participant["device_args"]["role"]
self.name = config.participant["device_args"]["name"]
self.docker_id = config.participant["device_args"]["docker_id"]
self.client = docker.from_env()
print_banner()
print_msg_box(msg=f"Name {self.name}\nRole: {self.role}", indent=2, title="Node information")
self._trainer = None
self._aggregator = None
self.round = None
self.total_rounds = None
self.federation_nodes = set()
self.initialized = False
self.log_dir = os.path.join(config.participant["tracking_args"]["log_dir"], self.experiment_name)
self.security = security
self.model_poisoning = model_poisoning
self.poisoned_ratio = poisoned_ratio
self.noise_type = noise_type
self._trainer = trainer(model, dataset, config=self.config)
self._aggregator = create_aggregator(config=self.config, engine=self)
self._secure_neighbors = []
self._is_malicious = True if self.config.participant["adversarial_args"]["attacks"] != "No Attack" else False
msg = f"Trainer: {self._trainer.__class__.__name__}"
msg += f"\nDataset: {self.config.participant['data_args']['dataset']}"
msg += f"\nIID: {self.config.participant['data_args']['iid']}"
msg += f"\nModel: {model.__class__.__name__}"
msg += f"\nAggregation algorithm: {self._aggregator.__class__.__name__}"
msg += f"\nNode behavior: {'malicious' if self._is_malicious else 'benign'}"
print_msg_box(msg=msg, indent=2, title="Scenario information")
print_msg_box(msg=f"Logging type: {self._trainer.logger.__class__.__name__}", indent=2, title="Logging information")
self.with_reputation = self.config.participant["defense_args"]["with_reputation"]
self.is_dynamic_topology = self.config.participant["defense_args"]["is_dynamic_topology"]
self.is_dynamic_aggregation = self.config.participant["defense_args"]["is_dynamic_aggregation"]
self.target_aggregation = create_target_aggregator(config=self.config, engine=self) if self.is_dynamic_aggregation else None
msg = f"Reputation system: {self.with_reputation}\nDynamic topology: {self.is_dynamic_topology}\nDynamic aggregation: {self.is_dynamic_aggregation}"
msg += f"\nTarget aggregation: {self.target_aggregation.__class__.__name__}" if self.is_dynamic_aggregation else ""
print_msg_box(msg=msg, indent=2, title="Defense information")
self.learning_cycle_lock = Locker(name="learning_cycle_lock", async_lock=True)
self.federation_setup_lock = Locker(name="federation_setup_lock", async_lock=True)
self.federation_ready_lock = Locker(name="federation_ready_lock", async_lock=True)
self.round_lock = Locker(name="round_lock", async_lock=True)
self.config.reload_config_file()
self._cm = CommunicationsManager(engine=self)
# Set the communication manager in the model (send messages from there)
self.trainer.model.set_communication_manager(self._cm)
self._reporter = Reporter(config=self.config, trainer=self.trainer, cm=self.cm)
self._event_manager = EventManager(
default_callbacks=[
self._discovery_discover_callback,
self._control_alive_callback,
self._connection_connect_callback,
self._connection_disconnect_callback,
self._federation_ready_callback,
self._start_federation_callback,
self._federation_models_included_callback,
]
)
# Register additional callbacks
self._event_manager.register_event((nebula_pb2.FederationMessage, nebula_pb2.FederationMessage.Action.REPUTATION), self._reputation_callback)
# ... add more callbacks here
@property
def cm(self):
return self._cm
@property
def reporter(self):
return self._reporter
@property
def event_manager(self):
return self._event_manager
@property
def aggregator(self):
return self._aggregator
def get_aggregator_type(self):
return type(self.aggregator)
@property
def trainer(self):
return self._trainer
def get_addr(self):
return self.addr
def get_config(self):
return self.config
def get_federation_nodes(self):
return self.federation_nodes
def get_initialization_status(self):
return self.initialized
def set_initialization_status(self, status):
self.initialized = status
def get_round(self):
return self.round
def get_federation_ready_lock(self):
return self.federation_ready_lock
def get_federation_setup_lock(self):
return self.federation_setup_lock
def get_round_lock(self):
return self.round_lock
@event_handler(nebula_pb2.DiscoveryMessage, nebula_pb2.DiscoveryMessage.Action.DISCOVER)
async def _discovery_discover_callback(self, source, message):
logging.info(f"üîç handle_discovery_message | Trigger | Received discovery message from {source} (network propagation)")
current_connections = await self.cm.get_addrs_current_connections(myself=True)
if source not in current_connections:
logging.info(f"üîç handle_discovery_message | Trigger | Connecting to {source} indirectly")
await self.cm.connect(source, direct=False)
async with self.cm.get_connections_lock():
if source in self.cm.connections:
# Update the latitude and longitude of the node (if already connected)
if message.latitude is not None and -90 <= message.latitude <= 90 and message.longitude is not None and -180 <= message.longitude <= 180:
self.cm.connections[source].update_geolocation(message.latitude, message.longitude)
else:
logging.warning(f"üîç Invalid geolocation received from {source}: latitude={message.latitude}, longitude={message.longitude}")
@event_handler(nebula_pb2.ControlMessage, nebula_pb2.ControlMessage.Action.ALIVE)
async def _control_alive_callback(self, source, message):
logging.info(f"üîß handle_control_message | Trigger | Received alive message from {source}")
current_connections = await self.cm.get_addrs_current_connections(myself=True)
if source in current_connections:
try:
await self.cm.health.alive(source)
except Exception as e:
logging.error(f"Error updating alive status in connection: {e}")
else:
logging.error(f"❗️ Connection {source} not found in connections...")
@event_handler(nebula_pb2.ConnectionMessage, nebula_pb2.ConnectionMessage.Action.CONNECT)
async def _connection_connect_callback(self, source, message):
logging.info(f"üîó handle_connection_message | Trigger | Received connection message from {source}")
current_connections = await self.cm.get_addrs_current_connections(myself=True)
if source not in current_connections:
logging.info(f"üîó handle_connection_message | Trigger | Connecting to {source}")
await self.cm.connect(source, direct=True)
@event_handler(nebula_pb2.ConnectionMessage, nebula_pb2.ConnectionMessage.Action.DISCONNECT)
async def _connection_disconnect_callback(self, source, message):
logging.info(f"üîó handle_connection_message | Trigger | Received disconnection message from {source}")
await self.cm.disconnect(source, mutual_disconnection=False)
@event_handler(nebula_pb2.FederationMessage, nebula_pb2.FederationMessage.Action.FEDERATION_READY)
async def _federation_ready_callback(self, source, message):
logging.info(f"üìù handle_federation_message | Trigger | Received ready federation message from {source}")
if self.config.participant["device_args"]["start"]:
logging.info(f"üìù handle_federation_message | Trigger | Adding ready connection {source}")
await self.cm.add_ready_connection(source)
@event_handler(nebula_pb2.FederationMessage, nebula_pb2.FederationMessage.Action.FEDERATION_START)
async def _start_federation_callback(self, source, message):
logging.info(f"üìù handle_federation_message | Trigger | Received start federation message from {source}")
await self.create_trainer_module()
@event_handler(nebula_pb2.FederationMessage, nebula_pb2.FederationMessage.Action.REPUTATION)
async def _reputation_callback(self, source, message):
malicious_nodes = message.arguments # List of malicious nodes
if self.with_reputation:
if len(malicious_nodes) > 0 and not self._is_malicious:
if self.is_dynamic_topology:
await self._disrupt_connection_using_reputation(malicious_nodes)
if self.is_dynamic_aggregation and self.aggregator != self.target_aggregation:
await self._dynamic_aggregator(self.aggregator.get_nodes_pending_models_to_aggregate(), malicious_nodes)
@event_handler(nebula_pb2.FederationMessage, nebula_pb2.FederationMessage.Action.FEDERATION_MODELS_INCLUDED)
async def _federation_models_included_callback(self, source, message):
logging.info(f"üìù handle_federation_message | Trigger | Received aggregation finished message from {source}")
try:
await self.cm.get_connections_lock().acquire_async()
if self.round is not None and source in self.cm.connections:
try:
if message is not None and len(message.arguments) > 0:
self.cm.connections[source].update_round(int(message.arguments[0])) if message.round in [self.round - 1, self.round] else None
except Exception as e:
logging.error(f"Error updating round in connection: {e}")
else:
logging.error(f"Connection not found for {source}")
except Exception as e:
logging.error(f"Error updating round in connection: {e}")
finally:
await self.cm.get_connections_lock().release_async()
async def create_trainer_module(self):
asyncio.create_task(self._start_learning())
logging.info(f"Started trainer module...")
async def start_communications(self):
logging.info(f"Neighbors: {self.config.participant['network_args']['neighbors']}")
logging.info(f"üí§ Cold start time: {self.config.participant['misc_args']['grace_time_connection']} seconds before connecting to the network")
await asyncio.sleep(self.config.participant["misc_args"]["grace_time_connection"])
await self.cm.start()
initial_neighbors = self.config.participant["network_args"]["neighbors"].split()
for i in initial_neighbors:
addr = f"{i.split(':')[0]}:{i.split(':')[1]}"
await self.cm.connect(addr, direct=True)
await asyncio.sleep(1)
while not self.cm.verify_connections(initial_neighbors):
await asyncio.sleep(1)
current_connections = await self.cm.get_addrs_current_connections()
logging.info(f"Connections verified: {current_connections}")
await self._reporter.start()
await self.cm.deploy_additional_services()
await asyncio.sleep(self.config.participant["misc_args"]["grace_time_connection"] // 2)
async def deploy_federation(self):
await self.federation_ready_lock.acquire_async()
if self.config.participant["device_args"]["start"]:
logging.info(f"üí§ Waiting for {self.config.participant['misc_args']['grace_time_start_federation']} seconds to start the federation")
await asyncio.sleep(self.config.participant["misc_args"]["grace_time_start_federation"])
if self.round is None:
while not await self.cm.check_federation_ready():
await asyncio.sleep(1)
logging.info(f"Sending FEDERATION_START to neighbors...")
message = self.cm.mm.generate_federation_message(nebula_pb2.FederationMessage.Action.FEDERATION_START)
await self.cm.send_message_to_neighbors(message)
await self.get_federation_ready_lock().release_async()
await self.create_trainer_module()
else:
logging.info(f"Federation already started")
else:
logging.info(f"Sending FEDERATION_READY to neighbors...")
message = self.cm.mm.generate_federation_message(nebula_pb2.FederationMessage.Action.FEDERATION_READY)
await self.cm.send_message_to_neighbors(message)
logging.info(f"üí§ Waiting until receiving the start signal from the start node")
async def _start_learning(self):
await self.learning_cycle_lock.acquire_async()
try:
if self.round is None:
self.total_rounds = self.config.participant["scenario_args"]["rounds"]
epochs = self.config.participant["training_args"]["epochs"]
await self.get_round_lock().acquire_async()
self.round = 0
await self.get_round_lock().release_async()
await self.learning_cycle_lock.release_async()
print_msg_box(msg=f"Starting Federated Learning process...", indent=2, title="Start of the experiment")
direct_connections = await self.cm.get_addrs_current_connections(only_direct=True)
undirected_connections = await self.cm.get_addrs_current_connections(only_undirected=True)
logging.info(f"Initial DIRECT connections: {direct_connections} | Initial UNDIRECT participants: {undirected_connections}")
logging.info(f"üí§ Waiting initialization of the federation...")
# Lock to wait for the federation to be ready (only affects the first round, when the learning starts)
# Only applies to non-start nodes --> start node does not wait for the federation to be ready
await self.get_federation_ready_lock().acquire_async()
if self.config.participant["device_args"]["start"]:
logging.info(f"Propagate initial model updates.")
await self.cm.propagator.propagate("initialization")
await self.get_federation_ready_lock().release_async()
self.trainer.set_epochs(epochs)
self.trainer.create_trainer()
await self._learning_cycle()
else:
if await self.learning_cycle_lock.locked_async():
await self.learning_cycle_lock.release_async()
finally:
if await self.learning_cycle_lock.locked_async():
await self.learning_cycle_lock.release_async()
async def _disrupt_connection_using_reputation(self, malicious_nodes):
malicious_nodes = list(set(malicious_nodes) & set(self.get_current_connections()))
logging.info(f"Disrupting connection with malicious nodes at round {self.round}")
logging.info(f"Removing {malicious_nodes} from {self.get_current_connections()}")
logging.info(f"Current connections before aggregation at round {self.round}: {self.get_current_connections()}")
for malicious_node in malicious_nodes:
if (self.get_name() != malicious_node) and (malicious_node not in self._secure_neighbors):
await self.cm.disconnect(malicious_node)
logging.info(f"Current connections after aggregation at round {self.round}: {self.get_current_connections()}")
await self._connect_with_benign(malicious_nodes)
async def _connect_with_benign(self, malicious_nodes):
lower_threshold = 1
higher_threshold = len(self.federation_nodes) - 1
if higher_threshold < lower_threshold:
higher_threshold = lower_threshold
benign_nodes = [i for i in self.federation_nodes if i not in malicious_nodes]
logging.info(f"_reputation_callback benign_nodes at round {self.round}: {benign_nodes}")
if len(self.get_current_connections()) <= lower_threshold:
for node in benign_nodes:
if len(self.get_current_connections()) <= higher_threshold and self.get_name() != node:
connected = await self.cm.connect(node)
if connected:
logging.info(f"Connect new connection with at round {self.round}: {connected}")
async def _dynamic_aggregator(self, aggregated_models_weights, malicious_nodes):
logging.info(f"malicious detected at round {self.round}, change aggergation protocol!")
if self.aggregator != self.target_aggregation:
logging.info(f"Current aggregator is: {self.aggregator}")
self.aggregator = self.target_aggregation
await self.aggregator.update_federation_nodes(self.federation_nodes)
for subnodes in aggregated_models_weights.keys():
sublist = subnodes.split()
(submodel, weights) = aggregated_models_weights[subnodes]
for node in sublist:
if node not in malicious_nodes:
await self.aggregator.include_model_in_buffer(submodel, weights, source=self.get_name(), round=self.round)
logging.info(f"Current aggregator is: {self.aggregator}")
async def _waiting_model_updates(self):
logging.info(f"üí§ Waiting convergence in round {self.round}.")
params = await self.aggregator.get_aggregation()
if params is not None:
logging.info(f"_waiting_model_updates | Aggregation done for round {self.round}, including parameters in local model.")
self.trainer.set_model_parameters(params)
else:
logging.error(f"Aggregation finished with no parameters")
async def _learning_cycle(self):
while self.round is not None and self.round < self.total_rounds:
print_msg_box(msg=f"Round {self.round} of {self.total_rounds} started.", indent=2, title="Round information")
self.trainer.on_round_start()
self.federation_nodes = await self.cm.get_addrs_current_connections(only_direct=True, myself=True)
logging.info(f"Federation nodes: {self.federation_nodes}")
direct_connections = await self.cm.get_addrs_current_connections(only_direct=True)
undirected_connections = await self.cm.get_addrs_current_connections(only_undirected=True)
logging.info(f"Direct connections: {direct_connections} | Undirected connections: {undirected_connections}")
logging.info(f"[Role {self.role}] Starting learning cycle...")
await self.aggregator.update_federation_nodes(self.federation_nodes)
await self._extended_learning_cycle()
await self.get_round_lock().acquire_async()
print_msg_box(msg=f"Round {self.round} of {self.total_rounds} finished.", indent=2, title="Round information")
await self.aggregator.reset()
self.trainer.on_round_end()
self.round = self.round + 1
self.config.participant["federation_args"]["round"] = self.round # Set current round in config (send to the controller)
await self.get_round_lock().release_async()
# End of the learning cycle
self.trainer.on_learning_cycle_end()
await self.trainer.test()
self.round = None
self.total_rounds = None
print_msg_box(msg=f"Federated Learning process has been completed.", indent=2, title="End of the experiment")
# Report
if self.config.participant["scenario_args"]["controller"] != "nebula-test":
result = await self.reporter.report_scenario_finished()
if result:
pass
else:
logging.error(f"Error reporting scenario finished")
logging.info(f"Checking if all my connections reached the total rounds...")
while not self.cm.check_finished_experiment():
await asyncio.sleep(1)
# Enable loggin info
logging.getLogger().disabled = True
# Kill itself
if self.config.participant["scenario_args"]["deployment"] == "docker":
try:
self.client.containers.get(self.docker_id).stop()
except Exception as e:
print(f"Error stopping Docker container with ID {self.docker_id}: {e}")
async def _extended_learning_cycle(self):
"""
This method is called in each round of the learning cycle. It is used to extend the learning cycle with additional
functionalities. The method is called in the _learning_cycle method.
"""
pass
def reputation_calculation(self, aggregated_models_weights):
cossim_threshold = 0.5
loss_threshold = 0.5
current_models = {}
for subnodes in aggregated_models_weights.keys():
sublist = subnodes.split()
submodel = aggregated_models_weights[subnodes][0]
for node in sublist:
current_models[node] = submodel
malicious_nodes = []
reputation_score = {}
local_model = self.trainer.get_model_parameters()
untrusted_nodes = list(current_models.keys())
logging.info(f"reputation_calculation untrusted_nodes at round {self.round}: {untrusted_nodes}")
for untrusted_node in untrusted_nodes:
logging.info(f"reputation_calculation untrusted_node at round {self.round}: {untrusted_node}")
logging.info(f"reputation_calculation self.get_name() at round {self.round}: {self.get_name()}")
if untrusted_node != self.get_name():
untrusted_model = current_models[untrusted_node]
cossim = cosine_metric(local_model, untrusted_model, similarity=True)
logging.info(f"reputation_calculation cossim at round {self.round}: {untrusted_node}: {cossim}")
self.trainer._logger.log_data({f"Reputation/cossim_{untrusted_node}": cossim}, step=self.round)
avg_loss = self.trainer.validate_neighbour_model(untrusted_model)
logging.info(f"reputation_calculation avg_loss at round {self.round} {untrusted_node}: {avg_loss}")
self.trainer._logger.log_data({f"Reputation/avg_loss_{untrusted_node}": avg_loss}, step=self.round)
reputation_score[untrusted_node] = (cossim, avg_loss)
if cossim < cossim_threshold or avg_loss > loss_threshold:
malicious_nodes.append(untrusted_node)
else:
self._secure_neighbors.append(untrusted_node)
return malicious_nodes, reputation_score
async def send_reputation(self, malicious_nodes):
logging.info(f"Sending REPUTATION to the rest of the topology: {malicious_nodes}")
message = self.cm.mm.generate_federation_message(nebula_pb2.FederationMessage.Action.REPUTATION, malicious_nodes)
await self.cm.send_message_to_neighbors(message)
class MaliciousNode(Engine):
def __init__(self, model, dataset, config=Config, trainer=Lightning, security=False, model_poisoning=False, poisoned_ratio=0, noise_type="gaussian"):
super().__init__(model, dataset, config, trainer, security, model_poisoning, poisoned_ratio, noise_type)
self.attack = create_attack(config.participant["adversarial_args"]["attacks"])
self.fit_time = 0.0
self.extra_time = 0.0
self.round_start_attack = 3
self.round_stop_attack = 6
self.aggregator_bening = self._aggregator
async def _extended_learning_cycle(self):
if self.attack != None:
if self.round in range(self.round_start_attack, self.round_stop_attack):
logging.info(f"Changing aggregation function maliciously...")
self._aggregator = create_malicious_aggregator(self._aggregator, self.attack)
elif self.round == self.round_stop_attack:
logging.info(f"Changing aggregation function benignly...")
self._aggregator = self.aggregator_bening
if self.role == "aggregator":
await AggregatorNode._extended_learning_cycle(self)
if self.role == "trainer":
await TrainerNode._extended_learning_cycle(self)
if self.role == "server":
await ServerNode._extended_learning_cycle(self)
class AggregatorNode(Engine):
def __init__(self, model, dataset, config=Config, trainer=Lightning, security=False, model_poisoning=False, poisoned_ratio=0, noise_type="gaussian"):
super().__init__(model, dataset, config, trainer, security, model_poisoning, poisoned_ratio, noise_type)
async def _extended_learning_cycle(self):
# Define the functionality of the aggregator node
await self.trainer.test()
await self.trainer.train()
await self.aggregator.include_model_in_buffer(self.trainer.get_model_parameters(), self.trainer.get_model_weight(), source=self.addr, round=self.round)
await self.cm.propagator.propagate("stable")
await self._waiting_model_updates()
class ServerNode(Engine):
def __init__(self, model, dataset, config=Config, trainer=Lightning, security=False, model_poisoning=False, poisoned_ratio=0, noise_type="gaussian"):
super().__init__(model, dataset, config, trainer, security, model_poisoning, poisoned_ratio, noise_type)
async def _extended_learning_cycle(self):
# Define the functionality of the server node
await self.trainer.test()
# In the first round, the server node doest take into account the initial model parameters for the aggregation
await self.aggregator.include_model_in_buffer(self.trainer.get_model_parameters(), self.trainer.BYPASS_MODEL_WEIGHT, source=self.addr, round=self.round)
await self._waiting_model_updates()
await self.cm.propagator.propagate("stable")
class TrainerNode(Engine):
def __init__(self, model, dataset, config=Config, trainer=Lightning, security=False, model_poisoning=False, poisoned_ratio=0, noise_type="gaussian"):
super().__init__(model, dataset, config, trainer, security, model_poisoning, poisoned_ratio, noise_type)
async def _extended_learning_cycle(self):
# Define the functionality of the trainer node
logging.info(f"Waiting global update | Assign _waiting_global_update = True")
self.aggregator.set_waiting_global_update()
await self.trainer.test()
await self.trainer.train()
await self.aggregator.include_model_in_buffer(
self.trainer.get_model_parameters(), self.trainer.get_model_weight(), source=self.addr, round=self.round, local=True
)
await self.cm.propagator.propagate("stable")
await self._waiting_model_updates()
class IdleNode(Engine):
def __init__(self, model, dataset, config=Config, trainer=Lightning, security=False, model_poisoning=False, poisoned_ratio=0, noise_type="gaussian"):
super().__init__(model, dataset, config, trainer, security, model_poisoning, poisoned_ratio, noise_type)
async def _extended_learning_cycle(self):
# Define the functionality of the idle node
logging.info(f"Waiting global update | Assign _waiting_global_update = True")
self.aggregator.set_waiting_global_update()
await self._waiting_model_updates()
| 31,053 | Python | .py | 495 | 52.127273 | 167 | 0.66914 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,954 | eventmanager.py | enriquetomasmb_nebula/nebula/core/eventmanager.py | import asyncio
from collections import defaultdict
from functools import wraps
import inspect
import logging
def event_handler(message_type, action):
"""Decorator for registering an event handler."""
def decorator(func):
@wraps(func)
async def async_wrapper(*args, **kwargs):
return await func(*args, **kwargs)
@wraps(func)
def sync_wrapper(*args, **kwargs):
return func(*args, **kwargs)
if asyncio.iscoroutinefunction(func):
wrapper = async_wrapper
else:
wrapper = sync_wrapper
action_name = message_type.Action.Name(action) if action is not None else "None"
wrapper._event_handler = (message_type.DESCRIPTOR.full_name, action_name)
return wrapper
return decorator
class EventManager:
def __init__(self, default_callbacks=None):
self._event_callbacks = defaultdict(list)
self._register_default_callbacks(default_callbacks or [])
def _register_default_callbacks(self, default_callbacks):
"""Registers default callbacks for events."""
for callback in default_callbacks:
handler_info = getattr(callback, "_event_handler", None)
if handler_info is not None:
self.register_event(handler_info, callback)
else:
raise ValueError("The callback must be decorated with @event_handler.")
def register_event(self, handler_info, callback):
"""Records a callback for a specific event."""
if callable(callback):
self._event_callbacks[handler_info].append(callback)
else:
raise ValueError("The callback must be a callable function.")
def unregister_event(self, handler_info, callback):
"""Unregisters a previously registered callback for an event."""
if callback in self._event_callbacks[handler_info]:
self._event_callbacks[handler_info].remove(callback)
async def trigger_event(self, source, message, *args, **kwargs):
"""Triggers an event, executing all associated callbacks."""
message_type = message.DESCRIPTOR.full_name
if hasattr(message, "action"):
action_name = message.Action.Name(message.action)
else:
action_name = "None"
handler_info = (message_type, action_name)
if handler_info in self._event_callbacks:
for callback in self._event_callbacks[handler_info]:
try:
if asyncio.iscoroutinefunction(callback) or inspect.iscoroutine(callback):
await callback(source, message, *args, **kwargs)
else:
callback(source, message, *args, **kwargs)
except Exception as e:
logging.error(f"Error executing callback for {handler_info}: {e}")
else:
logging.error(f"No callbacks registered for event {handler_info}")
async def get_event_callbacks(self, event_name):
"""Returns the callbacks for a specific event."""
return self._event_callbacks[event_name]
def get_event_callbacks_names(self):
"""Returns the names of the registered events."""
return self._event_callbacks.keys()
| 3,290 | Python | .py | 69 | 37.550725 | 94 | 0.641898 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,955 | aggregator.py | enriquetomasmb_nebula/nebula/core/aggregation/aggregator.py | from abc import ABC, abstractmethod
import asyncio
from functools import partial
import logging
from nebula.core.utils.locker import Locker
from nebula.core.pb import nebula_pb2
class AggregatorException(Exception):
pass
def create_aggregator(config, engine):
from nebula.core.aggregation.fedavg import FedAvg
from nebula.core.aggregation.krum import Krum
from nebula.core.aggregation.median import Median
from nebula.core.aggregation.trimmedmean import TrimmedMean
from nebula.core.aggregation.blockchainReputation import BlockchainReputation
ALGORITHM_MAP = {
"FedAvg": FedAvg,
"Krum": Krum,
"Median": Median,
"TrimmedMean": TrimmedMean,
"BlockchainReputation": BlockchainReputation,
}
algorithm = config.participant["aggregator_args"]["algorithm"]
aggregator = ALGORITHM_MAP.get(algorithm)
if aggregator:
return aggregator(config=config, engine=engine)
else:
raise AggregatorException(f"Aggregation algorithm {algorithm} not found.")
def create_target_aggregator(config, engine):
from nebula.core.aggregation.fedavg import FedAvg
from nebula.core.aggregation.krum import Krum
from nebula.core.aggregation.median import Median
from nebula.core.aggregation.trimmedmean import TrimmedMean
ALGORITHM_MAP = {
"FedAvg": FedAvg,
"Krum": Krum,
"Median": Median,
"TrimmedMean": TrimmedMean,
}
algorithm = config.participant["defense_args"]["target_aggregation"]
aggregator = ALGORITHM_MAP.get(algorithm)
if aggregator:
return aggregator(config=config, engine=engine)
else:
raise AggregatorException(f"Aggregation algorithm {algorithm} not found.")
class Aggregator(ABC):
def __init__(self, config=None, engine=None):
self.config = config
self.engine = engine
self._addr = config.participant["network_args"]["addr"]
logging.info(f"[{self.__class__.__name__}] Starting Aggregator")
self._federation_nodes = set()
self._waiting_global_update = False
self._pending_models_to_aggregate = {}
self._future_models_to_aggregate = {}
self._add_model_lock = Locker(name="add_model_lock", async_lock=True)
self._aggregation_done_lock = Locker(name="aggregation_done_lock", async_lock=True)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__str__()
@property
def cm(self):
return self.engine.cm
@abstractmethod
def run_aggregation(self, models):
if len(models) == 0:
logging.error("Trying to aggregate models when there are no models")
return None
async def update_federation_nodes(self, federation_nodes):
if not self._aggregation_done_lock.locked():
self._federation_nodes = federation_nodes
self._pending_models_to_aggregate.clear()
await self._aggregation_done_lock.acquire_async(timeout=self.config.participant["aggregator_args"]["aggregation_timeout"])
else:
raise Exception("It is not possible to set nodes to aggregate when the aggregation is running.")
def set_waiting_global_update(self):
self._waiting_global_update = True
async def reset(self):
await self._add_model_lock.acquire_async()
self._federation_nodes.clear()
self._pending_models_to_aggregate.clear()
try:
await self._aggregation_done_lock.release_async()
except:
pass
await self._add_model_lock.release_async()
def get_nodes_pending_models_to_aggregate(self):
return {node for key in self._pending_models_to_aggregate.keys() for node in key.split()}
async def _handle_global_update(self, model, source):
logging.info(f"🔄 _handle_global_update | source={source}")
logging.info(f"🔄 _handle_global_update | Received a model from {source}. Overwriting __models with the aggregated model.")
self._pending_models_to_aggregate.clear()
self._pending_models_to_aggregate = {source: (model, 1)}
self._waiting_global_update = False
await self._add_model_lock.release_async()
await self._aggregation_done_lock.release_async()
async def _add_pending_model(self, model, weight, source):
if len(self._federation_nodes) <= len(self.get_nodes_pending_models_to_aggregate()):
logging.info(f"🔄 _add_pending_model | Ignoring model...")
await self._add_model_lock.release_async()
return None
if source not in self._federation_nodes:
logging.info(f"🔄 _add_pending_model | Can't add a model from ({source}), which is not in the federation.")
await self._add_model_lock.release_async()
return None
elif source not in self.get_nodes_pending_models_to_aggregate():
logging.info(f"🔄 _add_pending_model | Node is not in the aggregation buffer --> Include model in the aggregation buffer.")
self._pending_models_to_aggregate.update({source: (model, weight)})
logging.info(
f"🔄 _add_pending_model | Model added in aggregation buffer ({str(len(self.get_nodes_pending_models_to_aggregate()))}/{str(len(self._federation_nodes))}) | Pending nodes: {self._federation_nodes - self.get_nodes_pending_models_to_aggregate()}"
)
# Check if _future_models_to_aggregate has models in the current round to include in the aggregation buffer
if self.engine.get_round() in self._future_models_to_aggregate:
logging.info(f"🔄 _add_pending_model | Including next models in the aggregation buffer for round {self.engine.get_round()}")
for future_model in self._future_models_to_aggregate[self.engine.get_round()]:
if future_model is None:
continue
future_model, future_weight, future_source = future_model
if future_source in self._federation_nodes and future_source not in self.get_nodes_pending_models_to_aggregate():
self._pending_models_to_aggregate.update({future_source: (future_model, future_weight)})
logging.info(
f"🔄 _add_pending_model | Next model added in aggregation buffer ({str(len(self.get_nodes_pending_models_to_aggregate()))}/{str(len(self._federation_nodes))}) | Pending nodes: {self._federation_nodes - self.get_nodes_pending_models_to_aggregate()}"
)
del self._future_models_to_aggregate[self.engine.get_round()]
for future_round in list(self._future_models_to_aggregate.keys()):
if future_round < self.engine.get_round():
del self._future_models_to_aggregate[future_round]
if len(self.get_nodes_pending_models_to_aggregate()) >= len(self._federation_nodes):
logging.info(f"🔄 _add_pending_model | All models were added in the aggregation buffer. Run aggregation...")
await self._aggregation_done_lock.release_async()
await self._add_model_lock.release_async()
return self.get_nodes_pending_models_to_aggregate()
async def include_model_in_buffer(self, model, weight, source=None, round=None, local=False):
await self._add_model_lock.acquire_async()
logging.info(
f"🔄 include_model_in_buffer | source={source} | round={round} | weight={weight} |--| __models={self._pending_models_to_aggregate.keys()} | federation_nodes={self._federation_nodes} | pending_models_to_aggregate={self.get_nodes_pending_models_to_aggregate()}"
)
if model is None:
logging.info(f"🔄 include_model_in_buffer | Ignoring model bad formed...")
await self._add_model_lock.release_async()
return
if round == -1:
# Be sure that the model message is not from the initialization round (round = -1)
logging.info(f"🔄 include_model_in_buffer | Ignoring model with round -1")
await self._add_model_lock.release_async()
return
if self._waiting_global_update and not local:
await self._handle_global_update(model, source)
return
await self._add_pending_model(model, weight, source)
if len(self.get_nodes_pending_models_to_aggregate()) >= len(self._federation_nodes):
logging.info(f"🔄 include_model_in_buffer | Broadcasting MODELS_INCLUDED for round {self.engine.get_round()}")
message = self.cm.mm.generate_federation_message(nebula_pb2.FederationMessage.Action.FEDERATION_MODELS_INCLUDED, [self.engine.get_round()])
await self.cm.send_message_to_neighbors(message)
return
async def get_aggregation(self):
try:
timeout = self.config.participant["aggregator_args"]["aggregation_timeout"]
await self._aggregation_done_lock.acquire_async(timeout=timeout)
except asyncio.TimeoutError:
logging.error(f"🔄 get_aggregation | Timeout reached for aggregation")
finally:
await self._aggregation_done_lock.release_async()
if self._waiting_global_update and len(self._pending_models_to_aggregate) == 1:
logging.info(f"🔄 get_aggregation | Received an global model. Overwriting my model with the aggregated model.")
aggregated_model = next(iter(self._pending_models_to_aggregate.values()))[0]
self._pending_models_to_aggregate.clear()
return aggregated_model
unique_nodes_involved = set(node for key in self._pending_models_to_aggregate for node in key.split())
if len(unique_nodes_involved) != len(self._federation_nodes):
missing_nodes = self._federation_nodes - unique_nodes_involved
logging.info(f"🔄 get_aggregation | Aggregation incomplete, missing models from: {missing_nodes}")
else:
logging.info(f"🔄 get_aggregation | All models accounted for, proceeding with aggregation.")
aggregated_result = self.run_aggregation(self._pending_models_to_aggregate)
self._pending_models_to_aggregate.clear()
return aggregated_result
async def include_next_model_in_buffer(self, model, weight, source=None, round=None):
logging.info(f"🔄 include_next_model_in_buffer | source={source} | round={round} | weight={weight}")
if round not in self._future_models_to_aggregate:
self._future_models_to_aggregate[round] = []
decoded_model = self.engine.trainer.deserialize_model(model)
self._future_models_to_aggregate[round].append((decoded_model, weight, source))
def print_model_size(self, model):
total_params = 0
total_memory = 0
for _, param in model.items():
num_params = param.numel()
total_params += num_params
memory_usage = param.element_size() * num_params
total_memory += memory_usage
total_memory_in_mb = total_memory / (1024**2)
logging.info(f"print_model_size | Model size: {total_memory_in_mb} MB")
def create_malicious_aggregator(aggregator, attack):
# It creates a partial function aggregate that wraps the aggregate method of the original aggregator.
run_aggregation = partial(aggregator.run_aggregation) # None is the self (not used)
# This function will replace the original aggregate method of the aggregator.
def malicious_aggregate(self, models):
accum = run_aggregation(models)
logging.info(f"malicious_aggregate | original aggregation result={accum}")
if models is not None:
accum = attack(accum)
logging.info(f"malicious_aggregate | attack aggregation result={accum}")
return accum
aggregator.run_aggregation = partial(malicious_aggregate, aggregator)
return aggregator
| 12,033 | Python | .py | 206 | 48.800971 | 275 | 0.670919 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,956 | fedavg.py | enriquetomasmb_nebula/nebula/core/aggregation/fedavg.py | import gc
import torch
from nebula.core.aggregation.aggregator import Aggregator
class FedAvg(Aggregator):
"""
Aggregator: Federated Averaging (FedAvg)
Authors: McMahan et al.
Year: 2016
"""
def __init__(self, config=None, **kwargs):
super().__init__(config, **kwargs)
def run_aggregation(self, models):
super().run_aggregation(models)
models = list(models.values())
total_samples = float(sum(weight for _, weight in models))
if total_samples == 0:
raise ValueError("Total number of samples must be greater than zero.")
last_model_params = models[-1][0]
accum = {layer: torch.zeros_like(param, dtype=torch.float32) for layer, param in last_model_params.items()}
with torch.no_grad():
for model_parameters, weight in models:
normalized_weight = weight / total_samples
for layer in accum:
accum[layer].add_(model_parameters[layer].to(accum[layer].dtype), alpha=normalized_weight)
del models
gc.collect()
# self.print_model_size(accum)
return accum
| 1,185 | Python | .py | 28 | 32.785714 | 115 | 0.635063 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,957 | krum.py | enriquetomasmb_nebula/nebula/core/aggregation/krum.py | import torch
import numpy
from nebula.core.aggregation.aggregator import Aggregator
class Krum(Aggregator):
"""
Aggregator: Krum
Authors: Peva Blanchard et al.
Year: 2017
Note: https://papers.nips.cc/paper/2017/hash/f4b9ec30ad9f68f89b29639786cb62ef-Abstract.html
"""
def __init__(self, config=None, **kwargs):
super().__init__(config, **kwargs)
def run_aggregation(self, models):
super().run_aggregation(models)
models = list(models.values())
accum = {layer: torch.zeros_like(param).float() for layer, param in models[-1][0].items()}
total_models = len(models)
distance_list = [0 for i in range(0, total_models)]
min_index = 0
min_distance_sum = float("inf")
for i in range(0, total_models):
m1, _ = models[i]
for j in range(0, total_models):
m2, _ = models[j]
distance = 0
if i == j:
distance = 0
else:
for layer in m1:
l1 = m1[layer]
l2 = m2[layer]
distance += numpy.linalg.norm(l1 - l2)
distance_list[i] += distance
if min_distance_sum > distance_list[i]:
min_distance_sum = distance_list[i]
min_index = i
m, _ = models[min_index]
for layer in m:
accum[layer] = accum[layer] + m[layer]
return accum
| 1,512 | Python | .py | 40 | 26.55 | 98 | 0.535568 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,958 | blockchainReputation.py | enriquetomasmb_nebula/nebula/core/aggregation/blockchainReputation.py | import time
from functools import wraps
import requests
import torch
from typing import Dict, List, Tuple, OrderedDict, Mapping
from eth_account import Account
from web3 import Web3
from web3.middleware import construct_sign_and_send_raw_middleware
from web3.middleware import geth_poa_middleware
from tabulate import tabulate
from retry import retry
from nebula.core.aggregation.aggregator import Aggregator
from nebula.core.utils.helper import cosine_metric, euclidean_metric, minkowski_metric, manhattan_metric, \
pearson_correlation_metric, jaccard_metric
def cossim_euclidean(model1, model2, similarity):
return cosine_metric(model1, model2, similarity=similarity) * euclidean_metric(model1, model2, similarity=similarity)
class BlockchainReputation(Aggregator):
"""
# BAT-SandrinHunkeler (BlockchainReputation)
Weighted FedAvg by using relative reputation of each model's trainer
Returns: aggregated model
"""
ALGORITHM_MAP = {
"Cossim": cosine_metric,
"Pearson": pearson_correlation_metric,
"Euclidean": euclidean_metric,
"Minkowski": minkowski_metric,
"Manhattan": manhattan_metric,
"Jaccard": jaccard_metric,
"CossimEuclid": cossim_euclidean
}
def __init__(self, similarity_metric: str = "CossimEuclid", config=None, **kwargs):
# initialize parent class
super().__init__(config, **kwargs)
self.config = config
# extract local NEBULA name
self.node_name = self.config.participant["network_args"]["addr"]
# initialize BlockchainHandler for interacting with oracle and non-validator node
self.__blockchain = BlockchainHandler(self.node_name)
# check if node is malicious for debugging
self.__malicious = self.config.participant["device_args"]["malicious"]
self.__opinion_algo = BlockchainReputation.ALGORITHM_MAP.get(similarity_metric)
self.__similarity_metric = similarity_metric
def run_aggregation(self, model_buffer: OrderedDict[str, OrderedDict[torch.Tensor, int]]) -> torch.Tensor:
print_with_frame("BLOCKCHAIN AGGREGATION: START")
# track aggregation time for experiments
start = time.time_ns()
# verify the registration process during initialization of the BlockchainHandler
self.__blockchain.verify_registration()
# verify if ether balance is still sufficient for aggregating, request more otherwise
self.__blockchain.verify_balance()
# create dict<sender, model>
current_models = {sender: model for sender, (model, weight) in model_buffer.items()}
print(f"Node: {self.node_name}", flush=True)
print(f"self.__malicious: {self.__malicious}", flush=True)
# extract local model from models to aggregate
local_model = model_buffer[self.node_name][0]
# compute similarity between local model and all buffered models
metric_values = {
sender: max(min(round(self.__opinion_algo(local_model, current_models[sender], similarity=True), 5), 1), 0)
for sender in current_models.keys() if sender != self.node_name}
# log similarity metric values
print_table("SIMILARITY METRIC", list(metric_values.items()),
["neighbour Node", f"{self.__similarity_metric} Similarity"])
# increase resolution of metric in upper half of interval
opinion_values = {sender: round(metric ** 3 * 100) for sender, metric in metric_values.items()}
# DEBUG
if int(self.node_name[-7]) <= 1 and self.__blockchain.round >= 5:
opinion_values = {sender: int(torch.randint(0, 101, (1,))[0]) for sender, metric in metric_values.items()}
# push local opinions to reputation system
self.__blockchain.push_opinions(opinion_values)
# log pushed opinion values
print_table("REPORT LOCAL OPINION", list(opinion_values.items()),
["Node", f"Transformed {self.__similarity_metric} Similarity"])
# request global reputation values from reputation system
reputation_values = self.__blockchain.get_reputations([sender for sender in current_models.keys()])
# log received global reputations
print_table("GLOBAL REPUTATION", list(reputation_values.items()), ["Node", "Global Reputation"])
# normalize all reputation values to sum() == 1
sum_reputations = sum(reputation_values.values())
if sum_reputations > 0:
normalized_reputation_values = {name: round(reputation_values[name] / sum_reputations, 3) for name in
reputation_values}
else:
normalized_reputation_values = reputation_values
# log normalized aggregation weights
print_table("AGGREGATION WEIGHTS", list(normalized_reputation_values.items()),
["Node", "Aggregation Weight"])
# initialize empty model
final_model = {layer: torch.zeros_like(param).float() for layer, param in local_model.items()}
# cover rare case where no models were added or reputation is 0 to return the local model
if sum_reputations > 0:
for sender in normalized_reputation_values.keys():
for layer in final_model:
final_model[layer] += current_models[sender][layer].float() * normalized_reputation_values[sender]
# otherwise, skip aggregation
else:
final_model = local_model
# report used gas to oracle and log cumulative gas used
print_table("TOTAL GAS USED", self.__blockchain.report_gas_oracle(), ["Node", "Cumulative Gas used"])
self.__blockchain.report_time_oracle(start)
print_with_frame("BLOCKCHAIN AGGREGATION: FINISHED")
# return newly aggregated model
return final_model
def print_table(title: str, values: list[Tuple | List], headers: list[str]) -> None:
"""
Prints a title, all values ordered in a table, with the headers as column titles.
Args:
title: Title of the table
values: Rows of table
headers: Column headers of table
Returns: None, prints output
"""
print(f"\n{'-' * 25} {title.upper()} {'-' * 25}", flush=True)
print(
tabulate(
sorted(values),
headers=headers,
tablefmt="grid"
),
flush=True
)
def print_with_frame(message) -> None:
"""
Prints a large frame with a title inside
Args:
message: Title to put into the frame
Returns: None
"""
message_length = len(message)
print(f"{' ' * 20}+{'-' * (message_length + 2)}+", flush=True)
print(f"{'*' * 20}| {message.upper()} |{'*' * 20}", flush=True)
print(f"{' ' * 20}+{'-' * (message_length + 2)}+", flush=True)
class BlockchainHandler:
"""
Handles interaction with Oracle and Non-Validator Node of Blockchain Network
"""
# static ip address of non-validator node with RPC-API
__rpc_url = "http://172.25.0.104:8545"
# static ip address of oracle with REST-API
__oracle_url = "http://172.25.0.105:8081"
# default REST header for interacting with oracle
__rest_header = {
'Content-type': 'application/json',
'Accept': 'application/json'
}
def __init__(self, home_address):
print_with_frame("BLOCKCHAIN INITIALIZATION: START")
# local NEBULA name, needed for reputation system
self.__home_ip = home_address
# randomly generated private key, needed to sign transaction
self.__private_key = str()
# public wallet address generated from the private key
self.__acc_address = str()
# variables for experiment, not required for aggregation
self.__gas_used = int()
self.__gas_price = float(27.3)
self.round = int(1)
# generate randomized primary key
self.__acc = self.__create_account()
# configure web3 objects for using Proof-of-Authority
self.__web3 = self.__initialize_web3()
# call Oracle to sense if blockchain is ready
print(f"{'-' * 25} CONNECT TO ORACLE {'-' * 25}", flush=True)
self.__wait_for_blockchain()
# request ETH funds for creating transactions, paying gas
self.__request_funds_from_oracle()
# check if funds were assigned by checking directly with blockchain
self.verify_balance()
# request contract address and header from Oracle
self.__contract_obj = self.__get_contract_from_oracle()
# register public wallet address at reputation system
print(f"{'-' * 25} CONNECT TO REPUTATION SYSTEM {'-' * 25}", flush=True)
self.__register()
print(f"BLOCKCHAIN: Registered to reputation system", flush=True)
# check if registration was successful
self.verify_registration()
print(f"BLOCKCHAIN: Verified registration", flush=True)
print_with_frame("BLOCKCHAIN INITIALIZATION: FINISHED")
@classmethod
@property
def oracle_url(cls) -> str:
return cls.__oracle_url
@classmethod
@property
def rest_header(cls) -> Mapping[str, str]:
return cls.__rest_header
def __create_account(self):
"""
Generates randomized primary key and derives public account from it
Returns: None
"""
print(f"{'-' * 25} REGISTER WORKING NODE {'-' * 25}", flush=True)
# generate random private key, address, public address
acc = Account.create()
# initialize web3 utility object
web3 = Web3()
# convert private key to hex, used in raw transactions
self.__private_key = web3.to_hex(acc.key)
# convert address type, used in raw transactions
self.__acc_address = web3.to_checksum_address(acc.address)
print(f"WORKER NODE: Registered account: {self.__home_ip}", flush=True)
print(f"WORKER NODE: Account address: {self.__acc_address}", flush=True)
# return generated account
return acc
def __initialize_web3(self):
"""
Initializes Web3 object and configures it for PoA protocol
Returns: Web3 object
"""
# initialize Web3 object with ip of non-validator node
web3 = Web3(Web3.HTTPProvider(self.__rpc_url, request_kwargs={'timeout': 20})) # 10
# inject Proof-of-Authority settings to object
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
# automatically sign transactions if available for execution
web3.middleware_onion.add(construct_sign_and_send_raw_middleware(self.__acc))
# inject local account as default
web3.eth.default_account = self.__acc_address
# return initialized object for executing transaction
return web3
@retry((Exception, requests.exceptions.HTTPError), tries=20, delay=4)
def __wait_for_blockchain(self) -> None:
"""
Request state of blockchain from Oracle by periodic calls and sleep
Returns: None
"""
# check with oracle if blockchain is ready for requests
response = requests.get(
url=f"{self.__oracle_url}/status",
headers=self.__rest_header,
timeout=20 # 10
)
# raise Exception if status is not successful
response.raise_for_status()
return print(f"ORACLE: Blockchain is ready", flush=True)
@retry((Exception, requests.exceptions.HTTPError), tries=3, delay=4)
def __request_funds_from_oracle(self) -> None:
"""
Requests funds from Oracle by sending public address
Returns: None
"""
# call oracle's faucet by Http post request
response = requests.post(
url=f"{self.__oracle_url}/faucet",
json={f"address": self.__acc_address},
headers=self.__rest_header,
timeout=20 # 10
)
# raise Exception if status is not successful
response.raise_for_status()
return print(f"ORACLE: Received 500 ETH", flush=True)
def verify_balance(self) -> None:
"""
Calls blockchain directly for requesting current balance
Returns: None
"""
# directly call view method from non-validator node
balance = self.__web3.eth.get_balance(self.__acc_address, "latest")
# convert wei to ether
balance_eth = self.__web3.from_wei(balance, "ether")
print(f"BLOCKCHAIN: Successfully verified balance of {balance_eth} ETH", flush=True)
# if node ran out of funds, it requests ether from the oracle
if balance_eth <= 1:
self.__request_funds_from_oracle()
return None
@retry((Exception, requests.exceptions.HTTPError), tries=3, delay=4)
def __get_contract_from_oracle(self):
"""
Requests header file and contract address, generates Web3 Contract object with it
Returns: Web3 Contract object
"""
response = requests.get(
url=f"{self.__oracle_url}/contract",
headers=self.__rest_header,
timeout=20 # 10
)
# raise Exception if status is not successful
response.raise_for_status()
# convert response to json to extract the abi and address
json_response = response.json()
print(f"ORACLE: Initialized chain code: {json_response.get('address')}", flush=True)
# return an initialized web3 contract object
return self.__web3.eth.contract(
abi=json_response.get("abi"),
address=json_response.get("address")
)
@retry((Exception, requests.exceptions.HTTPError), tries=3, delay=4)
def report_gas_oracle(self) -> list:
"""
Reports accumulated gas costs of all transactions made to the blockchain
Returns: List of all accumulated gas costs per registered node
"""
# method used for experiments, not needed for aggregation
response = requests.post(
url=f"{self.__oracle_url}/gas",
json={"amount": self.__gas_used, "round": self.round},
headers=self.__rest_header,
timeout=20 # 10
)
# raise Exception if status is not successful
response.raise_for_status()
# reset local gas accumulation
self.__gas_used = 0
# return list with gas usage for logging
return list(response.json().items())
@retry((Exception, requests.exceptions.HTTPError), tries=3, delay=4)
def report_reputation_oracle(self, records: list) -> None:
"""
Reports reputations used for aggregation
Returns: None
"""
# method used for experiments, not needed for aggregation
response = requests.post(
url=f"{self.__oracle_url}/reputation",
json={"records": records, "round": self.round, "sender":self.__home_ip},
headers=self.__rest_header,
timeout=20 # 10
)
# raise Exception if status is not successful
response.raise_for_status()
return None
def __sign_and_deploy(self, trx_hash):
"""
Signs a function call to the chain code with the primary key and awaits the receipt
Args:
trx_hash: Transformed dictionary of all properties relevant for call to chain code
Returns: transaction receipt confirming the successful write to the ledger
"""
# transaction is signed with private key
signed_transaction = self.__web3.eth.account.sign_transaction(trx_hash, private_key=self.__private_key)
# confirmation that transaction was passed from non-validator node to validator nodes
executed_transaction = self.__web3.eth.send_raw_transaction(signed_transaction.rawTransaction)
# non-validator node awaited the successful validation by validation nodes and returns receipt
transaction_receipt = self.__web3.eth.wait_for_transaction_receipt(executed_transaction)
# accumulate used gas
self.__gas_used += transaction_receipt.gasUsed
return transaction_receipt
@retry(Exception, tries=3, delay=4)
def push_opinions(self, opinion_dict: dict):
"""
Pushes all locally computed opinions of models to aggregate to the reputation system
Args:
opinion_dict: Dict of all names:opinions for writing to the reputation system
Returns: Json of transaction receipt
"""
# create raw transaction object to call rate_neighbors() from the reputation system
unsigned_trx = self.__contract_obj.functions.rate_neighbours(
list(opinion_dict.items())).build_transaction(
{
"chainId": self.__web3.eth.chain_id,
"from": self.__acc_address,
"nonce": self.__web3.eth.get_transaction_count(
self.__web3.to_checksum_address(self.__acc_address),
'pending'
),
"gasPrice": self.__web3.to_wei(self.__gas_price, "gwei")
}
)
# sign and execute the transaction
conf = self.__sign_and_deploy(unsigned_trx)
self.report_reputation_oracle(list(opinion_dict.items()))
# return the receipt as json
return self.__web3.to_json(conf)
@retry(Exception, tries=3, delay=4)
def get_reputations(self, ip_addresses: list) -> dict:
"""
Requests globally aggregated opinions values from reputation system for computing aggregation weights
Args:
ip_addresses: Names of nodes of which the reputation values should be generated
Returns: Dictionary of name:reputation from the reputation system
"""
final_reputations = dict()
stats_to_print = list()
# call get_reputations() from reputation system
raw_reputation = self.__contract_obj.functions.get_reputations(ip_addresses).call(
{"from": self.__acc_address})
# loop list with tuples from reputation system
for name, reputation, weighted_reputation, stddev_count, divisor, final_reputation, avg, median, stddev, index, avg_deviation, avg_avg_deviation, malicious_opinions in raw_reputation:
# list elements with an empty name can be ignored
if not name: continue
# print statistical values
stats_to_print.append(
[name, reputation / 10, weighted_reputation / 10, stddev_count / 10, divisor / 10, final_reputation / 10, avg / 10, median / 10,
stddev / 10, avg_deviation / 10, avg_avg_deviation / 10, malicious_opinions])
# assign the final reputation to a dict for later aggregation
final_reputations[name] = final_reputation / 10
print_table("REPUTATION SYSTEM STATE", stats_to_print,
["Name", "Reputation", "Weighted Rep. by local Node", "Stddev Count", "Divisor", "Final Reputation", "Mean", "Median",
"Stddev", "Avg Deviation in Opinion", "Avg of all Avg Deviations in Opinions", "Malicious Opinions"])
# if sum(final_reputations.values()):
# self.report_reputation_oracle(list(final_reputations.items()))
return final_reputations
@retry(Exception, tries=3, delay=4)
def __register(self) -> str:
"""
Registers a node's name with its public address, signed with private key
Returns: Json of transaction receipt
"""
# build raw transaction object to call public method register() from reputation system
unsigned_trx = self.__contract_obj.functions.register(self.__home_ip).build_transaction(
{
"chainId": self.__web3.eth.chain_id,
"from": self.__acc_address,
"nonce": self.__web3.eth.get_transaction_count(
self.__web3.to_checksum_address(self.__acc_address),
'pending'
),
"gasPrice": self.__web3.to_wei(self.__gas_price, "gwei")
}
)
# sign and execute created transaction
conf = self.__sign_and_deploy(unsigned_trx)
# return the receipt as json
return self.__web3.to_json(conf)
@retry(Exception, tries=3, delay=4)
def verify_registration(self) -> None:
"""
Verifies the successful registration of the node itself,
executes registration again if reputation system returns false
Returns: None
"""
# call view function of reputation system to check if registration was not abandoned by hard fork
confirmation = self.__contract_obj.functions.confirm_registration().call({"from": self.__acc_address})
# function returns boolean
if not confirmation:
# register again if not successful
self.__register()
# raise Exception to check again
raise Exception(f"EXCEPTION: _verify_registration() => Could not be confirmed)")
return None
@retry((Exception, requests.exceptions.HTTPError), tries=3, delay=4)
def report_time_oracle(self, start: float) -> None:
"""
Reports time used for aggregation
Returns: None
"""
# method used for experiments, not needed for aggregation
# report aggregation time and round to oracle
response = requests.post(
url=f"{BlockchainHandler.oracle_url}/time",
json={"time": (time.time_ns() - start) / (10 ** 9), "round": self.round},
headers=self.__rest_header,
timeout=20 # 10
)
# raise Exception if status is not successful
response.raise_for_status()
# increase aggregation round counter after reporting time
self.round += 1
return None | 22,013 | Python | .py | 449 | 39.536748 | 191 | 0.643464 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,959 | trimmedmean.py | enriquetomasmb_nebula/nebula/core/aggregation/trimmedmean.py | import logging
import torch
import numpy as np
from nebula.core.aggregation.aggregator import Aggregator
class TrimmedMean(Aggregator):
"""
Aggregator: TrimmedMean
Authors: Dong Yin et al et al.
Year: 2021
Note: https://arxiv.org/pdf/1803.01498.pdf
"""
def __init__(self, config=None, beta=0, **kwargs):
super().__init__(config, **kwargs)
self.beta = beta
def get_trimmedmean(self, weights):
# check if the weight tensor has enough space
weight_len = len(weights)
if weight_len <= 2 * self.beta:
remaining_wrights = weights
res = torch.mean(remaining_wrights, 0)
else:
# remove the largest and smallest β items
arr_weights = np.asarray(weights)
nobs = arr_weights.shape[0]
start = self.beta
end = nobs - self.beta
atmp = np.partition(arr_weights, (start, end - 1), 0)
sl = [slice(None)] * atmp.ndim
sl[0] = slice(start, end)
print(atmp[tuple(sl)])
arr_median = np.mean(atmp[tuple(sl)], axis=0)
res = torch.tensor(arr_median)
# get the mean of the remaining weights
return res
def run_aggregation(self, models):
super().run_aggregation(models)
models = list(models.values())
models_params = [m for m, _ in models]
total_models = len(models)
accum = {layer: torch.zeros_like(param).float() for layer, param in models[-1][0].items()}
for layer in accum:
weight_layer = accum[layer]
# get the shape of layer tensor
l_shape = list(weight_layer.shape)
# get the number of elements of layer tensor
number_layer_weights = torch.numel(weight_layer)
# if its 0-d tensor
if l_shape == []:
weights = torch.tensor([models_params[j][layer] for j in range(0, total_models)])
weights = weights.double()
w = self.get_trimmedmean(weights)
accum[layer] = w
else:
# flatten the tensor
weight_layer_flatten = weight_layer.view(number_layer_weights)
# flatten the tensor of each model
models_layer_weight_flatten = torch.stack(
[models_params[j][layer].view(number_layer_weights) for j in range(0, total_models)],
0,
)
# get the weight list [w1j,w2j,··· ,wmj], where wij is the jth parameter of the ith local model
trimmedmean = self.get_trimmedmean(models_layer_weight_flatten)
accum[layer] = trimmedmean.view(l_shape)
return accum
| 2,773 | Python | .py | 64 | 31.984375 | 111 | 0.575735 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,960 | dualhistagg.py | enriquetomasmb_nebula/nebula/core/aggregation/dualhistagg.py | import logging
import torch
import torch.nn.functional as F
import numpy as np
from nebula.core.aggregation.aggregator import Aggregator
from nebula.core.utils.helper import cosine_metric
class DualHistAgg(Aggregator):
"""
Aggregator: Dual History Aggregation (DualHistAgg)
Authors: Enrique et al.
Year: 2024
"""
def __init__(self, config=None, **kwargs):
super().__init__(config, **kwargs)
def softmax(self, x):
# Safeguard against empty input array
if x.size == 0:
return np.array([])
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) # ensure division is done correctly
def run_aggregation(self, models, reference_model=None):
if len(models) == 0:
logging.error("Trying to aggregate models when there are no models")
return None, None
models = list(models.values())
num_models = len(models)
logging.info(f"Number of models: {num_models}")
if num_models == 1:
logging.info("Only one model, returning it")
return models[0][0], models[0][0]
# Total Samples
total_samples = float(sum(w for _, w in models))
# Create a Zero Model
accum = {layer: torch.zeros_like(param).float() for layer, param in models[0][0].items()} # use first model for template
accum_similarity = accum.copy()
similarities = [cosine_metric(model, reference_model) for model, _ in models] if reference_model else [1] * num_models
logging.info(f"Similarities: {similarities}")
weights = self.softmax(np.array(similarities))
logging.info(f"Weights: {weights}")
# Aggregation process
for (model, _), weight, sim_weight in zip(models, weights, similarities):
for layer in accum:
accum[layer] += model[layer].float() * float(weight)
accum_similarity[layer] += model[layer].float() * float(sim_weight)
# Normalize aggregated models
for layer in accum:
accum[layer] /= total_samples
accum_similarity[layer] /= total_samples
return accum, accum_similarity
| 2,192 | Python | .py | 49 | 36.285714 | 129 | 0.633333 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,961 | fedavgSVM.py | enriquetomasmb_nebula/nebula/core/aggregation/fedavgSVM.py | import numpy as np
from nebula.core.aggregation.aggregator import Aggregator
from sklearn.svm import LinearSVC
class FedAvgSVM(Aggregator):
"""
Aggregator: Federated Averaging (FedAvg)
Authors: McMahan et al.
Year: 2016
Note: This is a modified version of FedAvg for SVMs.
"""
def __init__(self, config=None, **kwargs):
super().__init__(config, **kwargs)
def run_aggregation(self, models):
super().run_aggregation(models)
models = list(models.values())
total_samples = sum([y for _, y in models])
coeff_accum = np.zeros_like(models[-1][0].coef_)
intercept_accum = 0.0
for model, w in models:
if not isinstance(model, LinearSVC):
return None
coeff_accum += model.coef_ * w
intercept_accum += model.intercept_ * w
coeff_accum /= total_samples
intercept_accum /= total_samples
aggregated_svm = LinearSVC()
aggregated_svm.coef_ = coeff_accum
aggregated_svm.intercept_ = intercept_accum
return aggregated_svm
| 1,103 | Python | .py | 29 | 30.172414 | 57 | 0.635936 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,962 | median.py | enriquetomasmb_nebula/nebula/core/aggregation/median.py | import torch
import numpy as np
from nebula.core.aggregation.aggregator import Aggregator
class Median(Aggregator):
"""
Aggregator: Median
Authors: Dong Yin et al et al.
Year: 2021
Note: https://arxiv.org/pdf/1803.01498.pdf
"""
def __init__(self, config=None, **kwargs):
super().__init__(config, **kwargs)
def get_median(self, weights):
# check if the weight tensor has enough space
weight_len = len(weights)
median = 0
if weight_len % 2 == 1:
# odd number, return the median
median, _ = torch.median(weights, 0)
else:
# even number, return the mean of median two numbers
# sort the tensor
arr_weights = np.asarray(weights)
nobs = arr_weights.shape[0]
start = int(nobs / 2) - 1
end = int(nobs / 2) + 1
atmp = np.partition(arr_weights, (start, end - 1), 0)
sl = [slice(None)] * atmp.ndim
sl[0] = slice(start, end)
arr_median = np.mean(atmp[tuple(sl)], axis=0)
median = torch.tensor(arr_median)
return median
def run_aggregation(self, models):
super().run_aggregation(models)
models = list(models.values())
models_params = [m for m, _ in models]
total_models = len(models)
accum = {layer: torch.zeros_like(param).float() for layer, param in models[-1][0].items()}
# Calculate the trimmedmean for each parameter
for layer in accum:
weight_layer = accum[layer]
# get the shape of layer tensor
l_shape = list(weight_layer.shape)
# get the number of elements of layer tensor
number_layer_weights = torch.numel(weight_layer)
# if its 0-d tensor
if l_shape == []:
weights = torch.tensor([models_params[j][layer] for j in range(0, total_models)])
weights = weights.double()
w = self.get_median(weights)
accum[layer] = w
else:
# flatten the tensor
weight_layer_flatten = weight_layer.view(number_layer_weights)
# flatten the tensor of each model
models_layer_weight_flatten = torch.stack(
[models_params[j][layer].view(number_layer_weights) for j in range(0, total_models)],
0,
)
# get the weight list [w1j,w2j,··· ,wmj], where wij is the jth parameter of the ith local model
median = self.get_median(models_layer_weight_flatten)
accum[layer] = median.view(l_shape)
return accum
| 2,726 | Python | .py | 63 | 31.84127 | 111 | 0.565382 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,963 | nebulamodel.py | enriquetomasmb_nebula/nebula/core/models/nebulamodel.py | from abc import ABC, abstractmethod
import logging
import torch
from nebula.addons.functions import print_msg_box
import lightning as pl
from torchmetrics.classification import (
MulticlassAccuracy,
MulticlassRecall,
MulticlassPrecision,
MulticlassF1Score,
MulticlassConfusionMatrix,
)
from torchmetrics import MetricCollection
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
matplotlib.use("Agg")
plt.switch_backend("Agg")
from nebula.config.config import TRAINING_LOGGER
logging_training = logging.getLogger(TRAINING_LOGGER)
class NebulaModel(pl.LightningModule, ABC):
"""
Abstract class for the NEBULA model.
This class is an abstract class that defines the interface for the NEBULA model.
"""
def process_metrics(self, phase, y_pred, y, loss=None):
"""
Calculate and log metrics for the given phase.
The metrics are calculated in each batch.
Args:
phase (str): One of 'Train', 'Validation', or 'Test'
y_pred (torch.Tensor): Model predictions
y (torch.Tensor): Ground truth labels
loss (torch.Tensor, optional): Loss value
"""
y_pred_classes = torch.argmax(y_pred, dim=1).detach()
y = y.detach()
if phase == "Train":
self.logger.log_data({f"{phase}/Loss": loss.detach()})
self.train_metrics.update(y_pred_classes, y)
elif phase == "Validation":
self.val_metrics.update(y_pred_classes, y)
elif phase == "Test (Local)":
self.test_metrics.update(y_pred_classes, y)
self.cm.update(y_pred_classes, y) if self.cm is not None else None
elif phase == "Test (Global)":
self.test_metrics_global.update(y_pred_classes, y)
self.cm_global.update(y_pred_classes, y) if self.cm_global is not None else None
else:
raise NotImplementedError
del y_pred_classes, y
def log_metrics_end(self, phase):
"""
Log metrics for the given phase.
Args:
phase (str): One of 'Train', 'Validation', 'Test (Local)', or 'Test (Global)'
print_cm (bool): Print confusion matrix
plot_cm (bool): Plot confusion matrix
"""
if phase == "Train":
output = self.train_metrics.compute()
elif phase == "Validation":
output = self.val_metrics.compute()
elif phase == "Test (Local)":
output = self.test_metrics.compute()
elif phase == "Test (Global)":
output = self.test_metrics_global.compute()
else:
raise NotImplementedError
output = {f"{phase}/{key.replace('Multiclass', '').split('/')[-1]}": value.detach() for key, value in output.items()}
self.logger.log_data(output, step=self.global_number[phase])
metrics_str = ""
for key, value in output.items():
metrics_str += f"{key}: {value:.4f}\n"
print_msg_box(metrics_str, indent=2, title=f"{phase} Metrics | Epoch: {self.global_number[phase]} | Round: {self.round}", logger_name=TRAINING_LOGGER)
def generate_confusion_matrix(self, phase, print_cm=False, plot_cm=False):
"""
Generate and plot the confusion matrix for the given phase.
Args:
phase (str): One of 'Train', 'Validation', 'Test (Local)', or 'Test (Global)'
"""
if phase == "Test (Local)":
if self.cm is None:
raise ValueError(f"Confusion matrix not available for {phase} phase.")
cm = self.cm.compute().cpu()
elif phase == "Test (Global)":
if self.cm_global is None:
raise ValueError(f"Confusion matrix not available for {phase} phase.")
cm = self.cm_global.compute().cpu()
else:
raise NotImplementedError
if print_cm:
logging_training.info(f"{phase} / Confusion Matrix:\n{cm}")
if plot_cm:
cm_numpy = cm.numpy().astype(int)
classes = [i for i in range(self.num_classes)]
fig, ax = plt.subplots(figsize=(12, 12))
sns.heatmap(cm_numpy, annot=False, fmt="", cmap="Blues", ax=ax, xticklabels=classes, yticklabels=classes, square=True)
ax.set_xlabel("Predicted labels", fontsize=12)
ax.set_ylabel("True labels", fontsize=12)
ax.set_title(f"{phase} Confusion Matrix", fontsize=16)
plt.xticks(rotation=90, fontsize=6)
plt.yticks(rotation=0, fontsize=6)
plt.tight_layout()
self.logger.log_figure(fig, step=self.round, name=f"{phase}/CM")
plt.close()
del cm_numpy, classes, fig, ax
# Restablecer la matriz de confusión
if phase == "Test (Local)":
self.cm.reset()
else:
self.cm_global.reset()
del cm
def __init__(
self,
input_channels=1,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__()
self.input_channels = input_channels
self.num_classes = num_classes
self.learning_rate = learning_rate
if metrics is None:
metrics = MetricCollection(
[
MulticlassAccuracy(num_classes=num_classes),
MulticlassPrecision(num_classes=num_classes),
MulticlassRecall(num_classes=num_classes),
MulticlassF1Score(num_classes=num_classes),
]
)
self.train_metrics = metrics.clone(prefix="Train/")
self.val_metrics = metrics.clone(prefix="Validation/")
self.test_metrics = metrics.clone(prefix="Test (Local)/")
self.test_metrics_global = metrics.clone(prefix="Test (Global)/")
del metrics
if confusion_matrix is None:
self.cm = MulticlassConfusionMatrix(num_classes=num_classes)
self.cm_global = MulticlassConfusionMatrix(num_classes=num_classes)
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Round counter (number of training-validation-test rounds)
self.round = 0
# Epochs counter
self.global_number = {"Train": 0, "Validation": 0, "Test (Local)": 0, "Test (Global)": 0}
# Communication manager for sending messages from the model (e.g., prototypes, gradients)
# Model parameters are sent by default using network.propagator
self.communication_manager = None
def set_communication_manager(self, communication_manager):
self.communication_manager = communication_manager
def get_communication_manager(self):
if self.communication_manager is None:
raise ValueError("Communication manager not set.")
return self.communication_manager
@abstractmethod
def forward(self, x):
"""Forward pass of the model."""
pass
@abstractmethod
def configure_optimizers(self):
"""Optimizer configuration."""
pass
def step(self, batch, batch_idx, phase):
"""Training/validation/test step."""
x, y = batch
y_pred = self.forward(x)
loss = self.criterion(y_pred, y)
self.process_metrics(phase, y_pred, y, loss)
return loss
def training_step(self, batch, batch_idx):
"""
Training step for the model.
Args:
batch:
batch_id:
Returns:
"""
return self.step(batch, batch_idx=batch_idx, phase="Train")
def on_train_start(self):
logging_training.info(f"{'='*10} [Training] Started {'='*10}")
def on_train_end(self):
logging_training.info(f"{'='*10} [Training] Done {'='*10}")
def on_train_epoch_end(self):
self.log_metrics_end("Train")
self.train_metrics.reset()
self.global_number["Train"] += 1
def validation_step(self, batch, batch_idx):
"""
Validation step for the model.
Args:
batch:
batch_idx:
Returns:
"""
return self.step(batch, batch_idx=batch_idx, phase="Validation")
def on_validation_end(self):
pass
def on_validation_epoch_end(self):
# In general, the validation phase is done in one epoch
self.log_metrics_end("Validation")
self.val_metrics.reset()
self.global_number["Validation"] += 1
def test_step(self, batch, batch_idx, dataloader_idx=None):
"""
Test step for the model.
Args:
batch:
batch_idx:
Returns:
"""
if dataloader_idx == 0:
return self.step(batch, batch_idx=batch_idx, phase="Test (Local)")
else:
return self.step(batch, batch_idx=batch_idx, phase="Test (Global)")
def on_test_start(self):
logging_training.info(f"{'='*10} [Testing] Started {'='*10}")
def on_test_end(self):
logging_training.info(f"{'='*10} [Testing] Done {'='*10}")
def on_test_epoch_end(self):
# In general, the test phase is done in one epoch
self.log_metrics_end("Test (Local)")
self.log_metrics_end("Test (Global)")
self.generate_confusion_matrix("Test (Local)", print_cm=True, plot_cm=True)
self.generate_confusion_matrix("Test (Global)", print_cm=True, plot_cm=True)
self.test_metrics.reset()
self.test_metrics_global.reset()
self.global_number["Test (Local)"] += 1
self.global_number["Test (Global)"] += 1
def on_round_end(self):
self.round += 1
class NebulaModelStandalone(NebulaModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Log metrics per epoch
def on_train_end(self):
pass
def on_train_epoch_end(self):
self.log_metrics_end("Train")
self.train_metrics.reset()
# NebulaModel registers training rounds
# NebulaModelStandalone register the global number of epochs instead of rounds
self.global_number["Train"] += 1
def on_validation_end(self):
pass
def on_validation_epoch_end(self):
self.log_metrics_end("Validation")
self.global_number["Validation"] += 1
self.val_metrics.reset()
def on_test_end(self):
self.global_number["Test (Local)"] += 1
self.global_number["Test (Global)"] += 1
def on_test_epoch_end(self):
self.log_metrics_end("Test (Local)")
self.log_metrics_end("Test (Global)")
self.generate_confusion_matrix("Test (Local)", print_cm=True, plot_cm=True)
self.generate_confusion_matrix("Test (Global)", print_cm=True, plot_cm=True)
self.test_metrics.reset()
self.test_metrics_global.reset()
| 10,929 | Python | .py | 263 | 32.307985 | 158 | 0.609535 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,964 | cnn.py | enriquetomasmb_nebula/nebula/core/models/mnist/cnn.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class MNISTModelCNN(NebulaModel):
def __init__(
self,
input_channels=1,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.zeros(1, 1, 28, 28)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.conv1 = torch.nn.Conv2d(
in_channels=input_channels,
out_channels=32,
kernel_size=(5, 5),
padding="same",
)
self.relu = torch.nn.ReLU()
self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(5, 5), padding="same")
self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.l1 = torch.nn.Linear(7 * 7 * 64, 2048)
self.l2 = torch.nn.Linear(2048, num_classes)
def forward(self, x):
input_layer = x.view(-1, 1, 28, 28)
conv1 = self.relu(self.conv1(input_layer))
pool1 = self.pool1(conv1)
conv2 = self.relu(self.conv2(pool1))
pool2 = self.pool2(conv2)
pool2_flat = pool2.reshape(-1, 7 * 7 * 64)
dense = self.relu(self.l1(pool2_flat))
logits = self.l2(dense)
return logits
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.learning_rate,
betas=(self.config["beta1"], self.config["beta2"]),
amsgrad=self.config["amsgrad"],
)
return optimizer
| 1,868 | Python | .py | 47 | 30.765957 | 105 | 0.597023 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,965 | mlp.py | enriquetomasmb_nebula/nebula/core/models/mnist/mlp.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class MNISTModelMLP(NebulaModel):
def __init__(
self,
input_channels=1,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.example_input_array = torch.zeros(1, 1, 28, 28)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.l1 = torch.nn.Linear(28 * 28, 256)
self.l2 = torch.nn.Linear(256, 128)
self.l3 = torch.nn.Linear(128, num_classes)
def forward(self, x):
batch_size, channels, width, height = x.size()
x = x.view(batch_size, -1)
x = self.l1(x)
x = torch.relu(x)
x = self.l2(x)
x = torch.relu(x)
x = self.l3(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
| 1,093 | Python | .py | 31 | 27.387097 | 101 | 0.610218 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,966 | rnn.py | enriquetomasmb_nebula/nebula/core/models/sentiment140/rnn.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class Sentiment140ModelRNN(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.zeros(1, 1, 28, 28)
self.learning_rate = learning_rate
self.embedding_dim = 300
self.hidden_dim = 256
self.n_layers = 1
self.bidirectional = True
self.output_dim = num_classes
self.encoder = torch.nn.LSTM(
self.embedding_dim, self.hidden_dim, num_layers=self.n_layers, bidirectional=self.bidirectional, dropout=0.5, batch_first=True
)
self.fc = torch.nn.Linear(self.hidden_dim * 2, self.output_dim)
self.dropout = torch.nn.Dropout(0.5)
self.criterion = torch.nn.CrossEntropyLoss()
self.l1 = torch.nn.Linear(28 * 28, 256)
self.l2 = torch.nn.Linear(256, 128)
self.l3 = torch.nn.Linear(128, num_classes)
self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
def forward(self, x):
packed_output, (hidden, cell) = self.encoder(x)
hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))
out = self.fc(hidden)
out = torch.log_softmax(out, dim=1)
return out
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
| 1,732 | Python | .py | 40 | 34.925 | 138 | 0.625818 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,967 | cnn.py | enriquetomasmb_nebula/nebula/core/models/sentiment140/cnn.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
import math
class Sentiment140ModelCNN(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.zeros(1, 1, 28, 28)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.filter_sizes = [2, 3, 4]
self.n_filters = math.ceil(300 * len(self.filter_sizes) / 3)
self.convs = torch.nn.ModuleList([torch.nn.Conv2d(in_channels=1, out_channels=self.n_filters, kernel_size=(fs, 300)) for fs in self.filter_sizes])
self.fc = torch.nn.Linear(len(self.filter_sizes) * self.n_filters, self.num_classes)
self.dropout = torch.nn.Dropout(0.5)
self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
def forward(self, x):
x = x.unsqueeze(1)
conved = [torch.nn.functional.relu(conv(x)).squeeze(3) for conv in self.convs] # [(batch_size, n_filters, sent_len), ...] * len(filter_sizes)
pooled = [torch.nn.functional.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved] # [(batch_size, n_filters), ...] * len(filter_sizes)
cat = self.dropout(torch.cat(pooled, dim=1))
out = self.fc(cat)
return out
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.learning_rate,
betas=(self.config["beta1"], self.config["beta2"]),
amsgrad=self.config["amsgrad"],
)
return optimizer
| 1,872 | Python | .py | 39 | 39.615385 | 154 | 0.627397 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,968 | mlp.py | enriquetomasmb_nebula/nebula/core/models/syscall/mlp.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class SyscallModelMLP(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 17)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.l1 = torch.nn.Linear(17, 256)
self.batchnorm1 = torch.nn.BatchNorm1d(256)
self.dropout = torch.nn.Dropout(0.5)
self.l2 = torch.nn.Linear(256, 128)
self.batchnorm2 = torch.nn.BatchNorm1d(128)
self.l3 = torch.nn.Linear(128, num_classes)
def forward(self, x):
x = self.l1(x)
x = self.batchnorm1(x)
x = torch.relu(x)
x = self.dropout(x)
x = self.l2(x)
x = self.batchnorm2(x)
x = torch.relu(x)
x = self.dropout(x)
x = self.l3(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
| 1,340 | Python | .py | 37 | 28.081081 | 101 | 0.611583 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,969 | autoencoder.py | enriquetomasmb_nebula/nebula/core/models/syscall/autoencoder.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class SyscallModelAutoencoder(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, input_channels)
self.learning_rate = learning_rate
self.criterion = torch.nn.MSELoss()
self.fc1 = torch.nn.Linear(input_channels, 64)
self.fc2 = torch.nn.Linear(64, 16)
self.fc3 = torch.nn.Linear(16, 8)
self.fc4 = torch.nn.Linear(8, 16)
self.fc5 = torch.nn.Linear(16, 64)
self.fc6 = torch.nn.Linear(64, input_channels)
self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
def encode(self, x):
z = torch.relu(self.fc1(x))
z = torch.relu(self.fc2(z))
z = torch.relu(self.fc3(z))
return z
def decode(self, x):
z = torch.relu(self.fc4(x))
z = torch.relu(self.fc5(z))
z = torch.relu(self.fc6(z))
return z
def forward(self, x):
z = self.encode(x)
z = self.decode(z)
return z
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
| 1,537 | Python | .py | 41 | 29.390244 | 101 | 0.606734 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,970 | svm.py | enriquetomasmb_nebula/nebula/core/models/syscall/svm.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
import torchmetrics
class SyscallModelSGDOneClassSVM(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, input_channels)
self.learning_rate = learning_rate
self.nu = 0.1
self.w = torch.nn.Parameter(torch.zeros(input_channels), requires_grad=True)
self.rho = torch.nn.Parameter(torch.zeros(1), requires_grad=True)
self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
def forward(self, x):
return torch.matmul(x, self.w) - self.rho
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=self.learning_rate)
return optimizer
def hinge_loss(self, y):
return torch.mean(torch.clamp(1 - y, min=0))
def step(self, batch, batch_idx, phase):
x, labels = batch
x = x.to(self.device)
labels = labels.to(self.device)
y_pred = self.forward(x)
if phase == "Train":
loss = 0.5 * torch.sum(self.w**2) + self.nu * self.hinge_loss(y_pred)
self.log(f"{phase}/Loss", loss, prog_bar=True)
else:
y_pred_classes = (y_pred > 0).type(torch.int64)
loss = torch.nn.functional.binary_cross_entropy_with_logits(y_pred, labels.float())
self.log(f"{phase}/Loss", loss, prog_bar=True)
self.log(
f"{phase}/Accuracy",
torchmetrics.functional.accuracy(y_pred_classes, labels, task="multiclass"),
prog_bar=True,
)
return loss
| 1,966 | Python | .py | 46 | 33.608696 | 101 | 0.610471 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,971 | cnn.py | enriquetomasmb_nebula/nebula/core/models/emnist/cnn.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class EMNISTModelCNN(NebulaModel):
def __init__(
self,
input_channels=1,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 1, 28, 28)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.conv1 = torch.nn.Conv2d(
in_channels=input_channels,
out_channels=32,
kernel_size=(5, 5),
padding="same",
)
self.relu = torch.nn.ReLU()
self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(5, 5), padding="same")
self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.l1 = torch.nn.Linear(7 * 7 * 64, 2048)
self.l2 = torch.nn.Linear(2048, num_classes)
self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
def forward(self, x):
input_layer = x.view(-1, 1, 28, 28)
conv1 = self.relu(self.conv1(input_layer))
pool1 = self.pool1(conv1)
conv2 = self.relu(self.conv2(pool1))
pool2 = self.pool2(conv2)
pool2_flat = pool2.reshape(-1, 7 * 7 * 64)
dense = self.relu(self.l1(pool2_flat))
logits = self.l2(dense)
return logits
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.learning_rate,
betas=(self.config["beta1"], self.config["beta2"]),
amsgrad=self.config["amsgrad"],
)
return optimizer
| 1,946 | Python | .py | 48 | 31.520833 | 105 | 0.596083 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,972 | mlp.py | enriquetomasmb_nebula/nebula/core/models/emnist/mlp.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class EMNISTModelMLP(NebulaModel):
def __init__(
self,
input_channels=1,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.zeros(1, 1, 28, 28)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.l1 = torch.nn.Linear(28 * 28, 256)
self.l2 = torch.nn.Linear(256, 128)
self.l3 = torch.nn.Linear(128, num_classes)
def forward(self, x):
batch_size, channels, width, height = x.size()
x = x.view(batch_size, -1)
x = self.l1(x)
x = torch.relu(x)
x = self.l2(x)
x = torch.relu(x)
x = self.l3(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
| 1,174 | Python | .py | 32 | 28.71875 | 101 | 0.608811 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,973 | _blocks.py | enriquetomasmb_nebula/nebula/core/models/militarysar/_blocks.py | import torch.nn as nn
import collections
_activations = {"relu": nn.ReLU, "relu6": nn.ReLU6, "leaky_relu": nn.LeakyReLU}
class BaseBlock(nn.Module):
def __init__(self):
super(BaseBlock, self).__init__()
self._layer: nn.Sequential
def forward(self, x):
return self._layer(x)
class DenseBlock(BaseBlock):
def __init__(self, shape, **params):
super(DenseBlock, self).__init__()
in_dims, out_dims = shape
_seq = collections.OrderedDict(
[
("dense", nn.Linear(in_dims, out_dims)),
]
)
_act_name = params.get("activation")
if _act_name:
_seq.update({_act_name: _activations[_act_name](inplace=True)})
self._layer = nn.Sequential(_seq)
w_init = params.get("w_init", None)
idx = list(dict(self._layer.named_children()).keys()).index("dense")
if w_init:
w_init(self._layer[idx].weight)
b_init = params.get("b_init", None)
if b_init:
b_init(self._layer[idx].bias)
class Conv2DBlock(BaseBlock):
def __init__(self, shape, stride, padding="same", **params):
super(Conv2DBlock, self).__init__()
h, w, in_channels, out_channels = shape
_seq = collections.OrderedDict([("conv", nn.Conv2d(in_channels, out_channels, kernel_size=(h, w), stride=stride, padding=padding))])
_bn = params.get("batch_norm")
if _bn:
_seq.update({"bn": nn.BatchNorm2d(out_channels)})
_act_name = params.get("activation")
if _act_name:
_seq.update({_act_name: _activations[_act_name](inplace=True)})
_max_pool = params.get("max_pool")
if _max_pool:
_kernel_size = params.get("max_pool_size", 2)
_stride = params.get("max_pool_stride", _kernel_size)
_seq.update({"max_pool": nn.MaxPool2d(kernel_size=_kernel_size, stride=_stride)})
self._layer = nn.Sequential(_seq)
w_init = params.get("w_init", None)
idx = list(dict(self._layer.named_children()).keys()).index("conv")
if w_init:
w_init(self._layer[idx].weight)
b_init = params.get("b_init", None)
if b_init:
b_init(self._layer[idx].bias)
| 2,283 | Python | .py | 53 | 33.849057 | 140 | 0.575566 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,974 | cnn.py | enriquetomasmb_nebula/nebula/core/models/militarysar/cnn.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
from nebula.core.models.militarysar import _blocks
class MilitarySARModelCNN(NebulaModel):
def __init__(
self,
input_channels=2,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.example_input_array = torch.zeros((1, input_channels, 128, 128))
self.learning_rate = learning_rate
self.momentum = 0.9
self.weight_decay = 4e-3
self.dropout_rate = 0.5
self.criterion = torch.nn.CrossEntropyLoss()
self.model = torch.nn.Sequential(
_blocks.Conv2DBlock(shape=[5, 5, self.input_channels, 16], stride=1, padding="valid", activation="relu", max_pool=True),
_blocks.Conv2DBlock(shape=[5, 5, 16, 32], stride=1, padding="valid", activation="relu", max_pool=True),
_blocks.Conv2DBlock(shape=[6, 6, 32, 64], stride=1, padding="valid", activation="relu", max_pool=True),
_blocks.Conv2DBlock(shape=[5, 5, 64, 128], stride=1, padding="valid", activation="relu"),
torch.nn.Dropout(p=self.dropout_rate),
_blocks.Conv2DBlock(shape=[3, 3, 128, self.num_classes], stride=1, padding="valid"),
torch.nn.Flatten(),
torch.nn.Linear(360, num_classes),
)
def forward(self, x):
logits = self.model(x)
return logits
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=self.learning_rate, momentum=self.momentum, weight_decay=self.weight_decay)
# optimizer = torch.optim.Adam(
# self.parameters(),
# lr=self.learning_rate,
# weight_decay=self.weight_decay
# )
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=50, gamma=0.1)
# optimizer = torch.optim.Adam(
# self.parameters(),
# lr=self.learning_rate
# )
# lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
# optimizer=optimizer,
# milestones=self.lr_step,
# gamma=self.lr_decay
# )
return [optimizer], [lr_scheduler]
| 2,333 | Python | .py | 51 | 36.941176 | 133 | 0.619947 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,975 | mlp.py | enriquetomasmb_nebula/nebula/core/models/wadi/mlp.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class WADIModelMLP(NebulaModel):
def __init__(
self,
input_channels=1,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.zeros(1, 123)
self.learning_rate = learning_rate
self.criterion = torch.nn.BCELoss()
self.l1 = torch.nn.Linear(123, 1024)
self.l2 = torch.nn.Linear(1024, 512)
self.l3 = torch.nn.Linear(512, 256)
self.l4 = torch.nn.Linear(256, 128)
self.l5 = torch.nn.Linear(128, 64)
self.l6 = torch.nn.Linear(64, 32)
self.l7 = torch.nn.Linear(32, 16)
self.l8 = torch.nn.Linear(16, 8)
self.l9 = torch.nn.Linear(8, num_classes)
self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
def forward(self, x):
batch_size, features = x.size()
x = self.l1(x)
x = torch.relu(x)
x = self.l2(x)
x = torch.relu(x)
x = self.l3(x)
x = torch.relu(x)
x = self.l4(x)
x = torch.relu(x)
x = self.l5(x)
x = torch.relu(x)
x = self.l6(x)
x = torch.relu(x)
x = self.l7(x)
x = torch.relu(x)
x = self.l8(x)
x = torch.relu(x)
x = self.l9(x)
x = torch.sigmoid(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
| 1,760 | Python | .py | 51 | 26.117647 | 101 | 0.566471 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,976 | mlp.py | enriquetomasmb_nebula/nebula/core/models/kitsun/mlp.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class KitsunModelMLP(NebulaModel):
def __init__(
self,
input_channels=356,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.zeros(1, 356)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.l1 = torch.nn.Linear(input_channels, 1024)
self.batchnorm1 = torch.nn.BatchNorm1d(1024)
self.dropout = torch.nn.Dropout(0.5)
self.l2 = torch.nn.Linear(1024, 512)
self.batchnorm2 = torch.nn.BatchNorm1d(512)
self.l3 = torch.nn.Linear(512, 128)
self.batchnorm3 = torch.nn.BatchNorm1d(128)
self.l5 = torch.nn.Linear(128, num_classes)
def forward(self, x):
x = self.l1(x)
x = self.batchnorm1(x)
x = torch.relu(x)
x = self.dropout(x)
x = self.l2(x)
x = self.batchnorm2(x)
x = torch.relu(x)
x = self.dropout(x)
x = self.l3(x)
x = self.batchnorm3(x)
x = torch.relu(x)
x = self.dropout(x)
x = self.l5(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
| 1,561 | Python | .py | 43 | 28.069767 | 101 | 0.606883 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,977 | simplemobilenet.py | enriquetomasmb_nebula/nebula/core/models/cifar10/simplemobilenet.py | import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.switch_backend("Agg")
from torch import nn
import torch
from nebula.core.models.nebulamodel import NebulaModel
class SimpleMobileNetV1(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 3, 32, 32)
self.learning_rate = learning_rate
self.criterion = torch.torch.nn.CrossEntropyLoss()
def conv_dw(input_channels, num_classes, stride):
return nn.Sequential(
nn.Conv2d(
input_channels,
input_channels,
3,
stride,
1,
groups=input_channels,
bias=False,
),
nn.BatchNorm2d(input_channels),
nn.ReLU(inplace=True),
nn.Conv2d(input_channels, num_classes, 1, 1, 0, bias=False),
nn.BatchNorm2d(num_classes),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
nn.Conv2d(3, 32, 3, 1, 1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
conv_dw(32, 64, 1),
conv_dw(64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
nn.AdaptiveAvgPool2d(1),
)
self.fc = nn.Linear(256, num_classes)
def forward(self, x):
x = self.model(x)
x = x.view(-1, 256)
x = self.fc(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
| 2,062 | Python | .py | 59 | 24 | 101 | 0.542671 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,978 | fastermobilenet.py | enriquetomasmb_nebula/nebula/core/models/cifar10/fastermobilenet.py | from torch import nn
from nebula.core.models.nebulamodel import NebulaModel
import torch
class FasterMobileNet(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 3, 32, 32)
self.learning_rate = learning_rate
self.criterion = torch.torch.nn.CrossEntropyLoss()
def conv_dw(input_channels, num_classes, stride):
return nn.Sequential(
nn.Conv2d(
input_channels,
input_channels,
3,
stride,
1,
groups=input_channels,
bias=False,
),
nn.BatchNorm2d(input_channels),
nn.ReLU(inplace=True),
nn.Conv2d(input_channels, num_classes, 1, 1, 0, bias=False),
nn.BatchNorm2d(num_classes),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
nn.Conv2d(3, 16, 3, 1, 1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
conv_dw(16, 32, 1),
conv_dw(32, 64, 2),
conv_dw(64, 64, 1),
nn.AdaptiveAvgPool2d(1),
)
self.fc = nn.Linear(64, num_classes)
def forward(self, x):
x = self.model(x)
x = x.view(-1, 64)
x = self.fc(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.learning_rate,
betas=(self.config["beta1"], self.config["beta2"]),
amsgrad=self.config["amsgrad"],
)
return optimizer
| 2,029 | Python | .py | 58 | 23.362069 | 101 | 0.524198 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,979 | cnn.py | enriquetomasmb_nebula/nebula/core/models/cifar10/cnn.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class CIFAR10ModelCNN(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 3, 32, 32)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.conv1 = torch.nn.Conv2d(input_channels, 16, 3, padding=1)
self.conv2 = torch.nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = torch.nn.Conv2d(32, 64, 3, padding=1)
self.pool = torch.nn.MaxPool2d(2, 2)
self.fc1 = torch.nn.Linear(64 * 4 * 4, 512)
self.fc2 = torch.nn.Linear(512, num_classes)
def forward(self, x):
x = self.pool(torch.relu(self.conv1(x)))
x = self.pool(torch.relu(self.conv2(x)))
x = self.pool(torch.relu(self.conv3(x)))
x = x.view(-1, 64 * 4 * 4)
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.learning_rate,
betas=(self.config["beta1"], self.config["beta2"]),
amsgrad=self.config["amsgrad"],
)
return optimizer
| 1,542 | Python | .py | 39 | 31 | 101 | 0.59853 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,980 | dualagg.py | enriquetomasmb_nebula/nebula/core/models/cifar10/dualagg.py | import torch
import torch.nn.functional as F
import lightning as pl
from torchmetrics.classification import MulticlassAccuracy, MulticlassRecall, MulticlassPrecision, MulticlassF1Score, MulticlassConfusionMatrix
from torchmetrics import MetricCollection
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
matplotlib.use("Agg")
plt.switch_backend("Agg")
import logging
from nebula.config.config import TRAINING_LOGGER
logging_training = logging.getLogger(TRAINING_LOGGER)
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
"""
def __init__(self, mu=0.5):
super().__init__()
self.mu = mu
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, local_out, global_out, historical_out, labels):
"""
Calculates the contrastive loss between the local output, global output, and historical output.
Args:
local_out (torch.Tensor): The local output tensor of shape (batch_size, embedding_size).
global_out (torch.Tensor): The global output tensor of shape (batch_size, embedding_size).
historical_out (torch.Tensor): The historical output tensor of shape (batch_size, embedding_size).
labels (torch.Tensor): The ground truth labels tensor of shape (batch_size,).
Returns:
torch.Tensor: The contrastive loss value.
Raises:
ValueError: If the input tensors have different shapes.
Notes:
- The contrastive loss is calculated as the difference between the mean cosine similarity of the local output
with the historical output and the mean cosine similarity of the local output with the global output,
multiplied by a scaling factor mu.
- The cosine similarity values represent the similarity between the corresponding vectors in the input tensors.
Higher values indicate greater similarity, while lower values indicate less similarity.
"""
if local_out.shape != global_out.shape or local_out.shape != historical_out.shape:
raise ValueError("Input tensors must have the same shape.")
# Cross-entropy loss
ce_loss = self.cross_entropy_loss(local_out, labels)
# if round > 1:
# Positive cosine similarity
pos_cos_sim = F.cosine_similarity(local_out, historical_out, dim=1).mean()
# Negative cosine similarity
neg_cos_sim = -F.cosine_similarity(local_out, global_out, dim=1).mean()
# Combined loss
contrastive_loss = ce_loss + self.mu * 0.5 * (pos_cos_sim + neg_cos_sim)
logging_training.debug(f"Contrastive loss (mu={self.mu}) with 0.5 of factor: ce_loss: {ce_loss}, pos_cos_sim_local_historical: {pos_cos_sim}, neg_cos_sim_local_global: {neg_cos_sim}, loss: {contrastive_loss}")
return contrastive_loss
# else:
# logging_training.debug(f"Cross-entropy loss (local model): {ce_loss}")
# return ce_loss
class DualAggModel(pl.LightningModule):
def process_metrics(self, phase, y_pred, y, loss=None, mode="local"):
"""
Calculate and log metrics for the given phase.
Args:
phase (str): One of 'Train', 'Validation', or 'Test'
y_pred (torch.Tensor): Model predictions
y (torch.Tensor): Ground truth labels
loss (torch.Tensor, optional): Loss value
"""
y_pred_classes = torch.argmax(y_pred, dim=1)
if phase == "Train":
if mode == "local":
output = self.local_train_metrics(y_pred_classes, y)
elif mode == "historical":
output = self.historical_train_metrics(y_pred_classes, y)
elif mode == "global":
output = self.global_train_metrics(y_pred_classes, y)
elif phase == "Validation":
if mode == "local":
output = self.local_val_metrics(y_pred_classes, y)
elif mode == "historical":
output = self.historical_val_metrics(y_pred_classes, y)
elif mode == "global":
output = self.global_val_metrics(y_pred_classes, y)
elif phase == "Test":
if mode == "local":
output = self.local_test_metrics(y_pred_classes, y)
elif mode == "historical":
output = self.historical_test_metrics(y_pred_classes, y)
elif mode == "global":
output = self.global_test_metrics(y_pred_classes, y)
else:
raise NotImplementedError
# print(f"y_pred shape: {y_pred.shape}, y_pred_classes shape: {y_pred_classes.shape}, y shape: {y.shape}") # Debug print
output = {f"{mode}/{phase}/{key.replace('Multiclass', '').split('/')[-1]}": value for key, value in output.items()}
self.log_dict(output, prog_bar=True, logger=True)
if self.local_cm is not None and self.historical_cm is not None and self.global_cm is not None:
if mode == "local":
self.local_cm.update(y_pred_classes, y)
elif mode == "historical":
self.historical_cm.update(y_pred_classes, y)
elif mode == "global":
self.global_cm.update(y_pred_classes, y)
def log_metrics_by_epoch(self, phase, print_cm=False, plot_cm=False, mode="local"):
"""
Log all metrics at the end of an epoch for the given phase.
Args:
phase (str): One of 'Train', 'Validation', or 'Test'
:param phase:
:param plot_cm:
"""
if mode == "local":
print(f"Epoch end: {mode} {phase}, epoch number: {self.local_epoch_global_number[phase]}")
elif mode == "historical":
print(f"Epoch end: {mode} {phase}, epoch number: {self.historical_epoch_global_number[phase]}")
elif mode == "global":
print(f"Epoch end: {mode} {phase}, epoch number: {self.global_epoch_global_number[phase]}")
if phase == "Train":
if mode == "local":
output = self.local_train_metrics.compute()
self.local_train_metrics.reset()
elif mode == "historical":
output = self.historical_train_metrics.compute()
self.historical_train_metrics.reset()
elif mode == "global":
output = self.global_train_metrics.compute()
self.global_train_metrics.reset()
elif phase == "Validation":
if mode == "local":
output = self.local_val_metrics.compute()
self.local_val_metrics.reset()
elif mode == "historical":
output = self.historical_val_metrics.compute()
self.historical_val_metrics.reset()
elif mode == "global":
output = self.global_val_metrics.compute()
self.global_val_metrics.reset()
elif phase == "Test":
if mode == "local":
output = self.local_test_metrics.compute()
self.local_test_metrics.reset()
elif mode == "historical":
output = self.historical_test_metrics.compute()
self.historical_test_metrics.reset()
elif mode == "global":
output = self.global_test_metrics.compute()
self.global_test_metrics.reset()
else:
raise NotImplementedError
output = {f"{mode}/{phase}Epoch/{key.replace('Multiclass', '').split('/')[-1]}": value for key, value in output.items()}
self.log_dict(output, prog_bar=True, logger=True)
if self.local_cm is not None and self.historical_cm is not None and self.global_cm is not None:
if mode == "local":
cm = self.local_cm.compute().cpu()
elif mode == "historical":
cm = self.historical_cm.compute().cpu()
elif mode == "global":
cm = self.global_cm.compute().cpu()
print(f"{mode}/{phase}Epoch/CM\n", cm) if print_cm else None
if plot_cm:
plt.figure(figsize=(10, 7))
ax = sns.heatmap(cm.numpy(), annot=True, fmt="d", cmap="Blues")
ax.set_xlabel("Predicted labels")
ax.set_ylabel("True labels")
ax.set_title("Confusion Matrix")
ax.set_xticks(range(self.num_classes))
ax.set_yticks(range(self.num_classes))
ax.xaxis.set_ticklabels([i for i in range(self.num_classes)])
ax.yaxis.set_ticklabels([i for i in range(self.num_classes)])
if mode == "local":
self.logger.experiment.add_figure(f"{mode}/{phase}Epoch/CM", ax.get_figure(), global_step=self.local_epoch_global_number[phase])
elif mode == "historical":
self.logger.experiment.add_figure(f"{mode}/{phase}Epoch/CM", ax.get_figure(), global_step=self.historical_epoch_global_number[phase])
elif mode == "global":
self.logger.experiment.add_figure(f"{mode}/{phase}Epoch/CM", ax.get_figure(), global_step=self.global_epoch_global_number[phase])
plt.close()
if mode == "local":
self.local_epoch_global_number[phase] += 1
elif mode == "historical":
self.historical_epoch_global_number[phase] += 1
elif mode == "global":
self.global_epoch_global_number[phase] += 1
def __init__(self, input_channels=3, num_classes=10, learning_rate=1e-3, mu=0.5, metrics=None, confusion_matrix=None, seed=None):
super().__init__()
self.input_channels = input_channels
self.num_classes = num_classes
self.learning_rate = learning_rate
self.mu = mu
if metrics is None:
metrics = MetricCollection([MulticlassAccuracy(num_classes=num_classes), MulticlassPrecision(num_classes=num_classes), MulticlassRecall(num_classes=num_classes), MulticlassF1Score(num_classes=num_classes)])
# Define metrics
self.local_train_metrics = metrics.clone(prefix="Local/Train/")
self.local_val_metrics = metrics.clone(prefix="Local/Validation/")
self.local_test_metrics = metrics.clone(prefix="Local/Test/")
self.historical_train_metrics = metrics.clone(prefix="Historical/Train/")
self.historical_val_metrics = metrics.clone(prefix="Historical/Validation/")
self.historical_test_metrics = metrics.clone(prefix="Historical/Test/")
self.global_train_metrics = metrics.clone(prefix="Global/Train/")
self.global_val_metrics = metrics.clone(prefix="Global/Validation/")
self.global_test_metrics = metrics.clone(prefix="Global/Test/")
if confusion_matrix is None:
self.local_cm = MulticlassConfusionMatrix(num_classes=num_classes)
self.historical_cm = MulticlassConfusionMatrix(num_classes=num_classes)
self.global_cm = MulticlassConfusionMatrix(num_classes=num_classes)
# Set seed for reproducibility initialization
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.local_epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
self.historical_epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
self.global_epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 3, 32, 32)
self.learning_rate = learning_rate
self.criterion = ContrastiveLoss(mu=self.mu)
# Define layers of the model
self.model = torch.nn.Sequential(
torch.nn.Conv2d(input_channels, 16, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(16, 32, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(32, 64, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Flatten(),
torch.nn.Linear(64 * 4 * 4, 512),
torch.nn.ReLU(),
torch.nn.Linear(512, num_classes),
)
# Siamese models of the model above
self.historical_model = torch.nn.Sequential(
torch.nn.Conv2d(input_channels, 16, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(16, 32, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(32, 64, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Flatten(),
torch.nn.Linear(64 * 4 * 4, 512),
torch.nn.ReLU(),
torch.nn.Linear(512, num_classes),
)
self.global_model = torch.nn.Sequential(
torch.nn.Conv2d(input_channels, 16, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(16, 32, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(32, 64, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Flatten(),
torch.nn.Linear(64 * 4 * 4, 512),
torch.nn.ReLU(),
torch.nn.Linear(512, num_classes),
)
# self.historical_model = copy.deepcopy(self.model)
# self.global_model = copy.deepcopy(self.model)
def forward(self, x, mode="local"):
"""Forward pass of the model."""
if mode == "local":
return self.model(x)
elif mode == "global":
return self.global_model(x)
elif mode == "historical":
return self.historical_model(x)
else:
raise NotImplementedError
def configure_optimizers(self):
""" """
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate, betas=(self.config["beta1"], self.config["beta2"]), amsgrad=self.config["amsgrad"])
return optimizer
def step(self, batch, batch_idx, phase):
images, labels = batch
images = images.to(self.device)
labels = labels.to(self.device)
local_out = self.forward(images, mode="local")
with torch.no_grad():
historical_out = self.forward(images, mode="historical")
global_out = self.forward(images, mode="global")
loss = self.criterion(local_out, global_out, historical_out, labels)
# Get metrics for each batch and log them
self.log(f"{phase}/ConstrastiveLoss", loss, prog_bar=True, logger=True) # Constrastive loss
self.process_metrics(phase, local_out, labels, loss, mode="local")
self.process_metrics(phase, historical_out, labels, loss, mode="historical")
self.process_metrics(phase, global_out, labels, loss, mode="global")
return loss
def training_step(self, batch, batch_id):
"""
Training step for the model.
Args:
batch:
batch_id:
Returns:
"""
return self.step(batch, batch_id, "Train")
def on_train_epoch_end(self):
self.log_metrics_by_epoch("Train", print_cm=True, plot_cm=True, mode="local")
self.log_metrics_by_epoch("Train", print_cm=True, plot_cm=True, mode="historical")
self.log_metrics_by_epoch("Train", print_cm=True, plot_cm=True, mode="global")
def validation_step(self, batch, batch_idx):
"""
Validation step for the model.
Args:
batch:
batch_idx:
Returns:
"""
return self.step(batch, batch_idx, "Validation")
def on_validation_epoch_end(self):
self.log_metrics_by_epoch("Validation", print_cm=True, plot_cm=False, mode="local")
self.log_metrics_by_epoch("Validation", print_cm=True, plot_cm=False, mode="historical")
self.log_metrics_by_epoch("Validation", print_cm=True, plot_cm=False, mode="global")
def test_step(self, batch, batch_idx):
"""
Test step for the model.
Args:
batch:
batch_idx:
Returns:
"""
return self.step(batch, batch_idx, "Test")
def on_test_epoch_end(self):
self.log_metrics_by_epoch("Test", print_cm=True, plot_cm=True, mode="local")
self.log_metrics_by_epoch("Test", print_cm=True, plot_cm=True, mode="historical")
self.log_metrics_by_epoch("Test", print_cm=True, plot_cm=True, mode="global")
def save_historical_model(self):
"""
Save the current local model as the historical model.
"""
logging_training.info("Copying local model to historical model.")
self.historical_model.load_state_dict(self.model.state_dict())
def global_load_state_dict(self, state_dict):
"""
Load the given state dictionary into the global model.
Args:
state_dict (dict): The state dictionary to load into the global model.
"""
logging_training.info("Loading state dict into global model.")
adapted_state_dict = self.adapt_state_dict_for_model(state_dict, "model")
self.global_model.load_state_dict(adapted_state_dict)
def historical_load_state_dict(self, state_dict):
"""
Load the given state dictionary into the historical model.
Args:
state_dict (dict): The state dictionary to load into the historical model.
"""
logging_training.info("Loading state dict into historical model.")
adapted_state_dict = self.adapt_state_dict_for_model(state_dict, "model")
self.historical_model.load_state_dict(adapted_state_dict)
def adapt_state_dict_for_model(self, state_dict, model_prefix):
"""
Adapt the keys in the provided state_dict to match the structure expected by the model.
"""
new_state_dict = {}
prefix = f"{model_prefix}."
for key, value in state_dict.items():
if key.startswith(prefix):
# Remove the specific prefix from each key
new_key = key[len(prefix) :]
new_state_dict[new_key] = value
return new_state_dict
def get_global_model_parameters(self):
"""
Get the parameters of the global model.
"""
return self.global_model.state_dict()
def print_summary(self):
"""
Print a summary of local, historical and global models to check if they are the same.
"""
logging_training.info("Local model summary:")
logging_training.info(self.model)
logging_training.info("Historical model summary:")
logging_training.info(self.historical_model)
logging_training.info("Global model summary:")
logging_training.info(self.global_model)
| 19,090 | Python | .py | 380 | 39.307895 | 218 | 0.607141 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,981 | cnnV3.py | enriquetomasmb_nebula/nebula/core/models/cifar10/cnnV3.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class CIFAR10ModelCNN_V3(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 3, 32, 32)
self.learning_rate = learning_rate
self.criterion = torch.torch.nn.CrossEntropyLoss()
self.layer1 = torch.nn.Sequential(
torch.nn.Conv2d(input_channels, 32, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(),
torch.nn.Conv2d(32, 32, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Dropout(0.25),
)
self.layer2 = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 64, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Dropout(0.25),
)
self.layer3 = torch.nn.Sequential(
torch.nn.Conv2d(64, 128, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(),
torch.nn.Conv2d(128, 128, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Dropout(0.25),
)
self.fc_layer = torch.nn.Sequential(
torch.nn.Linear(128 * 4 * 4, 512),
torch.nn.ReLU(),
torch.nn.Dropout(0.5),
torch.nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = x.view(x.size(0), -1) # Flatten the layer
x = self.fc_layer(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.learning_rate,
betas=(self.config["beta1"], self.config["beta2"]),
amsgrad=self.config["amsgrad"],
)
return optimizer
| 2,586 | Python | .py | 68 | 27.602941 | 101 | 0.561977 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,982 | resnet.py | enriquetomasmb_nebula/nebula/core/models/cifar10/resnet.py | from torch import nn
from torchmetrics import MetricCollection
import matplotlib
import matplotlib.pyplot as plt
from nebula.core.models.nebulamodel import NebulaModel
matplotlib.use("Agg")
plt.switch_backend("Agg")
import torch
from torchmetrics.classification import (
MulticlassAccuracy,
MulticlassRecall,
MulticlassPrecision,
MulticlassF1Score,
MulticlassConfusionMatrix,
)
from torchvision.models import resnet18, resnet34, resnet50
IMAGE_SIZE = 32
BATCH_SIZE = 256 if torch.cuda.is_available() else 64
classifiers = {
"resnet18": resnet18(),
"resnet34": resnet34(),
"resnet50": resnet50(),
}
class CIFAR10ModelResNet(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
implementation="scratch",
classifier="resnet9",
):
super().__init__()
if metrics is None:
metrics = MetricCollection(
[
MulticlassAccuracy(num_classes=num_classes),
MulticlassPrecision(num_classes=num_classes),
MulticlassRecall(num_classes=num_classes),
MulticlassF1Score(num_classes=num_classes),
]
)
self.train_metrics = metrics.clone(prefix="Train/")
self.val_metrics = metrics.clone(prefix="Validation/")
self.test_metrics = metrics.clone(prefix="Test/")
if confusion_matrix is None:
self.cm = MulticlassConfusionMatrix(num_classes=num_classes)
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.implementation = implementation
self.classifier = classifier
self.example_input_array = torch.rand(1, 3, 32, 32)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.model = self._build_model(input_channels, num_classes)
self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
def _build_model(self, input_channels, num_classes):
if self.implementation == "scratch":
if self.classifier == "resnet9":
"""
ResNet9 implementation
"""
def conv_block(input_channels, num_classes, pool=False):
layers = [
nn.Conv2d(input_channels, num_classes, kernel_size=3, padding=1),
nn.BatchNorm2d(num_classes),
nn.ReLU(inplace=True),
]
if pool:
layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
conv1 = conv_block(input_channels, 64)
conv2 = conv_block(64, 128, pool=True)
res1 = nn.Sequential(conv_block(128, 128), conv_block(128, 128))
conv3 = conv_block(128, 256, pool=True)
conv4 = conv_block(256, 512, pool=True)
res2 = nn.Sequential(conv_block(512, 512), conv_block(512, 512))
classifier = nn.Sequential(nn.MaxPool2d(4), nn.Flatten(), nn.Linear(512, num_classes))
return nn.ModuleDict(
{
"conv1": conv1,
"conv2": conv2,
"res1": res1,
"conv3": conv3,
"conv4": conv4,
"res2": res2,
"classifier": classifier,
}
)
if self.implementation in classifiers:
model = classifiers[self.classifier]
model.fc = torch.nn.Linear(model.fc.in_features, 10)
return model
raise NotImplementedError()
if self.implementation == "timm":
raise NotImplementedError()
raise NotImplementedError()
def forward(self, x):
if not isinstance(x, torch.Tensor):
raise TypeError(f"images must be a torch.Tensor, got {type(x)}")
if self.implementation == "scratch":
if self.classifier == "resnet9":
out = self.model["conv1"](x)
out = self.model["conv2"](out)
out = self.model["res1"](out) + out
out = self.model["conv3"](out)
out = self.model["conv4"](out)
out = self.model["res2"](out) + out
out = self.model["classifier"](out)
return out
return self.model(x)
if self.implementation == "timm":
raise NotImplementedError()
raise NotImplementedError()
def configure_optimizers(self):
if self.implementation == "scratch" and self.classifier == "resnet9":
params = []
for key, module in self.model.items():
params += list(module.parameters())
optimizer = torch.optim.Adam(params, lr=self.learning_rate, weight_decay=1e-4)
else:
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate, weight_decay=1e-4)
return optimizer
| 5,276 | Python | .py | 127 | 29.165354 | 102 | 0.563477 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,983 | cnnV2.py | enriquetomasmb_nebula/nebula/core/models/cifar10/cnnV2.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class CIFAR10ModelCNN_V2(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 3, 32, 32)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.conv1 = torch.nn.Conv2d(input_channels, 32, 5, padding=2)
self.bn1 = torch.nn.BatchNorm2d(32)
self.conv2 = torch.nn.Conv2d(32, 64, 3, padding=1)
self.bn2 = torch.nn.BatchNorm2d(64)
self.conv3 = torch.nn.Conv2d(64, 128, 3, padding=1)
self.bn3 = torch.nn.BatchNorm2d(128)
self.pool = torch.nn.MaxPool2d(2, 2)
self.fc1 = torch.nn.Linear(128 * 4 * 4, 512)
self.fc2 = torch.nn.Linear(512, num_classes)
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
x = self.pool(torch.relu(self.bn1(self.conv1(x))))
x = self.pool(torch.relu(self.bn2(self.conv2(x))))
x = self.pool(torch.relu(self.bn3(self.conv3(x))))
x = x.view(-1, 128 * 4 * 4)
x = torch.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.learning_rate,
betas=(self.config["beta1"], self.config["beta2"]),
amsgrad=self.config["amsgrad"],
)
return optimizer
| 1,784 | Python | .py | 44 | 31.954545 | 101 | 0.602076 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,984 | cnn.py | enriquetomasmb_nebula/nebula/core/models/fashionmnist/cnn.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class FashionMNISTModelCNN(NebulaModel):
def __init__(
self,
input_channels=1,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 1, 28, 28)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.conv1 = torch.nn.Conv2d(
in_channels=input_channels,
out_channels=32,
kernel_size=(5, 5),
padding="same",
)
self.relu = torch.nn.ReLU()
self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(5, 5), padding="same")
self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.l1 = torch.nn.Linear(7 * 7 * 64, 2048)
self.l2 = torch.nn.Linear(2048, num_classes)
self.epoch_global_number = {"Train": 0, "Validation": 0, "Test": 0}
def forward(self, x):
input_layer = x.view(-1, 1, 28, 28)
conv1 = self.relu(self.conv1(input_layer))
pool1 = self.pool1(conv1)
conv2 = self.relu(self.conv2(pool1))
pool2 = self.pool2(conv2)
pool2_flat = pool2.reshape(-1, 7 * 7 * 64)
dense = self.relu(self.l1(pool2_flat))
logits = self.l2(dense)
return logits
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.learning_rate,
betas=(self.config["beta1"], self.config["beta2"]),
amsgrad=self.config["amsgrad"],
)
return optimizer
| 1,952 | Python | .py | 48 | 31.645833 | 105 | 0.597361 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,985 | mlp.py | enriquetomasmb_nebula/nebula/core/models/fashionmnist/mlp.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class FashionMNISTModelMLP(NebulaModel):
def __init__(
self,
input_channels=1,
num_classes=10,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {"beta1": 0.851436, "beta2": 0.999689, "amsgrad": True}
self.example_input_array = torch.rand(1, 1, 28, 28)
self.learning_rate = learning_rate
self.criterion = torch.nn.CrossEntropyLoss()
self.l1 = torch.nn.Linear(28 * 28, 256)
self.l2 = torch.nn.Linear(256, 128)
self.l3 = torch.nn.Linear(128, num_classes)
def forward(self, x):
batch_size, channels, width, height = x.size()
x = x.view(batch_size, -1)
x = self.l1(x)
x = torch.relu(x)
x = self.l2(x)
x = torch.relu(x)
x = self.l3(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
| 1,179 | Python | .py | 32 | 28.875 | 101 | 0.610526 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,986 | cnn.py | enriquetomasmb_nebula/nebula/core/models/cifar100/cnn.py | import torch
from nebula.core.models.nebulamodel import NebulaModel
class CIFAR100ModelCNN(NebulaModel):
def __init__(
self,
input_channels=3,
num_classes=100,
learning_rate=1e-3,
metrics=None,
confusion_matrix=None,
seed=None,
):
super().__init__(input_channels, num_classes, learning_rate, metrics, confusion_matrix, seed)
self.config = {
"lr": 8.0505e-05,
"beta1": 0.851436,
"beta2": 0.999689,
"amsgrad": True,
}
self.example_input_array = torch.rand(1, 3, 32, 32)
self.criterion = torch.torch.nn.CrossEntropyLoss()
self.conv1 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(inplace=True),
)
self.conv2 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(2),
)
self.res1 = torch.nn.Sequential(
torch.nn.Sequential(
torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(inplace=True),
),
torch.nn.Sequential(
torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(inplace=True),
),
)
self.conv3 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(256),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(2),
)
self.conv4 = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(512),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(2),
)
self.res2 = torch.nn.Sequential(
torch.nn.Sequential(
torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(512),
torch.nn.ReLU(inplace=True),
),
torch.nn.Sequential(
torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(512),
torch.nn.ReLU(inplace=True),
),
)
self.classifier = torch.nn.Sequential(torch.nn.MaxPool2d(4), torch.nn.Flatten(), torch.nn.Linear(512, self.num_classes))
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.res1(x) + x
x = self.conv3(x)
x = self.conv4(x)
x = self.res2(x) + x
x = self.classifier(x)
return x
def configure_optimizers(self):
return torch.optim.Adam(
self.parameters(),
lr=self.config["lr"],
betas=(self.config["beta1"], self.config["beta2"]),
amsgrad=self.config["amsgrad"],
)
| 3,310 | Python | .py | 85 | 27.682353 | 128 | 0.55929 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,987 | nebuladataset.py | enriquetomasmb_nebula/nebula/core/datasets/nebuladataset.py | from abc import ABC, abstractmethod
from collections import defaultdict
import time
import numpy as np
from sklearn.manifold import TSNE
from torch.utils.data import Dataset
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
matplotlib.use("Agg")
plt.switch_backend("Agg")
from nebula.core.utils.deterministic import enable_deterministic
import logging
from nebula.config.config import TRAINING_LOGGER
logging_training = logging.getLogger(TRAINING_LOGGER)
class NebulaDataset(Dataset, ABC):
"""
Abstract class for a partitioned dataset.
Classes inheriting from this class need to implement specific methods
for loading and partitioning the dataset.
"""
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__()
if partition_id < 0 or partition_id >= partitions_number:
raise ValueError(f"partition_id {partition_id} is out of range for partitions_number {partitions_number}")
self.num_classes = num_classes
self.partition_id = partition_id
self.partitions_number = partitions_number
self.batch_size = batch_size
self.num_workers = num_workers
self.iid = iid
self.partition = partition
self.partition_parameter = partition_parameter
self.seed = seed
self.config = config
self.train_set = None
self.train_indices_map = None
self.test_set = None
self.test_indices_map = None
# Classes of the participants to be sure that the same classes are used in training and testing
self.class_distribution = None
enable_deterministic(config)
if self.partition_id == 0:
self.initialize_dataset()
else:
max_tries = 10
for i in range(max_tries):
try:
self.initialize_dataset()
break
except Exception as e:
logging_training.info(f"Error loading dataset: {e}. Retrying {i+1}/{max_tries} in 5 seconds...")
time.sleep(5)
@abstractmethod
def initialize_dataset(self):
"""
Initialize the dataset. This should load or create the dataset.
"""
pass
@abstractmethod
def generate_non_iid_map(self, dataset, partition="dirichlet", plot=False):
"""
Create a non-iid map of the dataset.
"""
pass
@abstractmethod
def generate_iid_map(self, dataset, plot=False):
"""
Create an iid map of the dataset.
"""
pass
def get_train_labels(self):
"""
Get the labels of the training set based on the indices map.
"""
if self.train_indices_map is None:
return None
return [self.train_set.targets[idx] for idx in self.train_indices_map]
def get_test_labels(self):
"""
Get the labels of the test set based on the indices map.
"""
if self.test_indices_map is None:
return None
return [self.test_set.targets[idx] for idx in self.test_indices_map]
def get_local_test_labels(self):
"""
Get the labels of the local test set based on the indices map.
"""
if self.local_test_indices_map is None:
return None
return [self.test_set.targets[idx] for idx in self.local_test_indices_map]
def plot_data_distribution(self, dataset, partitions_map):
"""
Plot the data distribution of the dataset.
Plot the data distribution of the dataset according to the partitions map provided.
Args:
dataset: The dataset to plot (torch.utils.data.Dataset).
partitions_map: The map of the dataset partitions.
"""
# Plot the data distribution of the dataset, one graph per partition
sns.set()
sns.set_style("whitegrid", {"axes.grid": False})
sns.set_context("paper", font_scale=1.5)
sns.set_palette("Set2")
for i in range(self.partitions_number):
indices = partitions_map[i]
class_counts = [0] * self.num_classes
for idx in indices:
label = dataset.targets[idx]
class_counts[label] += 1
logging_training.info(f"Participant {i+1} class distribution: {class_counts}")
plt.figure()
plt.bar(range(self.num_classes), class_counts)
plt.xlabel("Class")
plt.ylabel("Number of samples")
plt.xticks(range(self.num_classes))
if self.iid:
plt.title(f"Participant {i+1} class distribution (IID)")
else:
plt.title(f"Participant {i+1} class distribution (Non-IID - {self.partition}) - {self.partition_parameter}")
plt.tight_layout()
path_to_save = f"{self.config.participant['tracking_args']['log_dir']}/{self.config.participant['scenario_args']['name']}/participant_{i}_class_distribution_{'iid' if self.iid else 'non_iid'}{'_' + self.partition if not self.iid else ''}.png"
plt.savefig(path_to_save, dpi=300, bbox_inches="tight")
plt.close()
plt.figure()
max_point_size = 500
min_point_size = 0
for i in range(self.partitions_number):
class_counts = [0] * self.num_classes
indices = partitions_map[i]
for idx in indices:
label = dataset.targets[idx]
class_counts[label] += 1
# Normalize the point sizes for this partition
max_samples_partition = max(class_counts)
sizes = [(size / max_samples_partition) * (max_point_size - min_point_size) + min_point_size for size in class_counts]
plt.scatter([i] * self.num_classes, range(self.num_classes), s=sizes, alpha=0.5)
plt.xlabel("Participant")
plt.ylabel("Class")
plt.xticks(range(self.partitions_number))
plt.yticks(range(self.num_classes))
if self.iid:
plt.title(f"Participant {i+1} class distribution (IID)")
else:
plt.title(f"Participant {i+1} class distribution (Non-IID - {self.partition}) - {self.partition_parameter}")
plt.tight_layout()
# Saves the distribution display with circles of different size
path_to_save = f"{self.config.participant['tracking_args']['log_dir']}/{self.config.participant['scenario_args']['name']}/class_distribution_{'iid' if self.iid else 'non_iid'}{'_' + self.partition if not self.iid else ''}.png"
plt.savefig(path_to_save, dpi=300, bbox_inches="tight")
plt.close()
if hasattr(self, "tsne") and self.tsne:
self.visualize_tsne(dataset)
def visualize_tsne(self, dataset):
X = [] # List for storing the characteristics of the samples
y = [] # Ready to store the labels of the samples
for idx in range(len(dataset)): # Assuming that 'dataset' is a list or array of your samples
sample, label = dataset[idx]
X.append(sample.flatten())
y.append(label)
X = np.array(X)
y = np.array(y)
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(X)
plt.figure(figsize=(16, 10))
sns.scatterplot(x=tsne_results[:, 0], y=tsne_results[:, 1], hue=y, palette=sns.color_palette("hsv", self.num_classes), legend="full", alpha=0.7)
plt.title("t-SNE visualization of the dataset")
plt.xlabel("t-SNE axis 1")
plt.ylabel("t-SNE axis 2")
plt.legend(title="Class")
plt.tight_layout()
path_to_save_tsne = f"{self.config.participant['tracking_args']['log_dir']}/{self.config.participant['scenario_args']['name']}/tsne_visualization.png"
plt.savefig(path_to_save_tsne, dpi=300, bbox_inches="tight")
plt.close()
def dirichlet_partition(self, dataset, alpha=0.5, min_samples_per_class=10):
y_data = self._get_targets(dataset)
unique_labels = np.unique(y_data)
logging_training.info(f"Labels unique: {unique_labels}")
num_samples = len(y_data)
indices_per_partition = [[] for _ in range(self.partitions_number)]
label_distribution = self.class_distribution if self.class_distribution is not None else None
for label in unique_labels:
label_indices = np.where(y_data == label)[0]
np.random.shuffle(label_indices)
if label_distribution is None:
proportions = np.random.dirichlet([alpha] * self.partitions_number)
else:
proportions = label_distribution[label]
proportions = self._adjust_proportions(proportions, indices_per_partition, num_samples)
split_points = (np.cumsum(proportions) * len(label_indices)).astype(int)[:-1]
for partition_idx, indices in enumerate(np.split(label_indices, split_points)):
if len(indices) < min_samples_per_class:
indices_per_partition[partition_idx].extend([])
else:
indices_per_partition[partition_idx].extend(indices)
if label_distribution is None:
self.class_distribution = self._calculate_class_distribution(indices_per_partition, y_data)
return {i: indices for i, indices in enumerate(indices_per_partition)}
def _adjust_proportions(self, proportions, indices_per_partition, num_samples):
adjusted = np.array([p * (len(indices) < num_samples / self.partitions_number) for p, indices in zip(proportions, indices_per_partition)])
return adjusted / adjusted.sum()
def _calculate_class_distribution(self, indices_per_partition, y_data):
distribution = defaultdict(lambda: np.zeros(self.partitions_number))
for partition_idx, indices in enumerate(indices_per_partition):
labels, counts = np.unique(y_data[indices], return_counts=True)
for label, count in zip(labels, counts):
distribution[label][partition_idx] = count
return {k: v / v.sum() for k, v in distribution.items()}
@staticmethod
def _get_targets(dataset) -> np.ndarray:
if isinstance(dataset.targets, np.ndarray):
return dataset.targets
elif hasattr(dataset.targets, "numpy"):
return dataset.targets.numpy()
else:
return np.asarray(dataset.targets)
def homo_partition(self, dataset):
"""
Homogeneously partition the dataset into multiple subsets.
This function divides a dataset into a specified number of subsets, where each subset
is intended to have a roughly equal number of samples. This method aims to ensure a
homogeneous distribution of data across all subsets. It's particularly useful in
scenarios where a uniform distribution of data is desired among all federated learning
clients.
Args:
dataset (torch.utils.data.Dataset): The dataset to partition. It should have
'data' and 'targets' attributes.
Returns:
dict: A dictionary where keys are subset indices (ranging from 0 to partitions_number-1)
and values are lists of indices corresponding to the samples in each subset.
The function randomly shuffles the entire dataset and then splits it into the number
of subsets specified by `partitions_number`. It ensures that each subset has a similar number
of samples. The function also prints the class distribution in each subset for reference.
Example usage:
federated_data = homo_partition(my_dataset)
# This creates federated data subsets with homogeneous distribution.
"""
n_nets = self.partitions_number
n_train = len(dataset.targets)
np.random.seed(self.seed)
idxs = np.random.permutation(n_train)
batch_idxs = np.array_split(idxs, n_nets)
net_dataidx_map = {i: batch_idxs[i] for i in range(n_nets)}
# partitioned_datasets = []
for i in range(self.partitions_number):
# subset = torch.utils.data.Subset(dataset, net_dataidx_map[i])
# partitioned_datasets.append(subset)
# Print class distribution in the current partition
class_counts = [0] * self.num_classes
for idx in net_dataidx_map[i]:
label = dataset.targets[idx]
class_counts[label] += 1
logging_training.info(f"Partition {i+1} class distribution: {class_counts}")
return net_dataidx_map
def balanced_iid_partition(self, dataset):
"""
Partition the dataset into balanced and IID (Independent and Identically Distributed)
subsets for each client.
This function divides a dataset into a specified number of subsets (federated clients),
where each subset has an equal class distribution. This makes the partition suitable for
simulating IID data scenarios in federated learning.
Args:
dataset (list): The dataset to partition. It should be a list of tuples where each
tuple represents a data sample and its corresponding label.
Returns:
dict: A dictionary where keys are client IDs (ranging from 0 to partitions_number-1) and
values are lists of indices corresponding to the samples assigned to each client.
The function ensures that each class is represented equally in each subset. The
partitioning process involves iterating over each class, shuffling the indices of that class,
and then splitting them equally among the clients. The function does not print the class
distribution in each subset.
Example usage:
federated_data = balanced_iid_partition(my_dataset)
# This creates federated data subsets with equal class distributions.
"""
num_clients = self.partitions_number
clients_data = {i: [] for i in range(num_clients)}
# Get the labels from the dataset
if isinstance(dataset.targets, np.ndarray):
labels = dataset.targets
elif hasattr(dataset.targets, "numpy"): # Check if it's a tensor with .numpy() method
labels = dataset.targets.numpy()
else: # If it's a list
labels = np.asarray(dataset.targets)
label_counts = np.bincount(labels)
min_label = label_counts.argmin()
min_count = label_counts[min_label]
for label in range(self.num_classes):
# Get the indices of the same label samples
label_indices = np.where(labels == label)[0]
np.random.seed(self.seed)
np.random.shuffle(label_indices)
# Split the data based on their labels
samples_per_client = min_count // num_clients
for i in range(num_clients):
start_idx = i * samples_per_client
end_idx = (i + 1) * samples_per_client
clients_data[i].extend(label_indices[start_idx:end_idx])
return clients_data
def unbalanced_iid_partition(self, dataset, imbalance_factor=2):
"""
Partition the dataset into multiple IID (Independent and Identically Distributed)
subsets with different size.
This function divides a dataset into a specified number of IID subsets (federated
clients), where each subset has a different number of samples. The number of samples
in each subset is determined by an imbalance factor, making the partition suitable
for simulating imbalanced data scenarios in federated learning.
Args:
dataset (list): The dataset to partition. It should be a list of tuples where
each tuple represents a data sample and its corresponding label.
imbalance_factor (float): The factor to determine the degree of imbalance
among the subsets. A lower imbalance factor leads to more
imbalanced partitions.
Returns:
dict: A dictionary where keys are client IDs (ranging from 0 to partitions_number-1) and
values are lists of indices corresponding to the samples assigned to each client.
The function ensures that each class is represented in each subset but with varying
proportions. The partitioning process involves iterating over each class, shuffling
the indices of that class, and then splitting them according to the calculated subset
sizes. The function does not print the class distribution in each subset.
Example usage:
federated_data = unbalanced_iid_partition(my_dataset, imbalance_factor=2)
# This creates federated data subsets with varying number of samples based on
# an imbalance factor of 2.
"""
num_clients = self.partitions_number
clients_data = {i: [] for i in range(num_clients)}
# Get the labels from the dataset
labels = np.array([dataset.targets[idx] for idx in range(len(dataset))])
label_counts = np.bincount(labels)
min_label = label_counts.argmin()
min_count = label_counts[min_label]
# Set the initial_subset_size
initial_subset_size = min_count // num_clients
# Calculate the number of samples for each subset based on the imbalance factor
subset_sizes = [initial_subset_size]
for i in range(1, num_clients):
subset_sizes.append(int(subset_sizes[i - 1] * ((imbalance_factor - 1) / imbalance_factor)))
for label in range(self.num_classes):
# Get the indices of the same label samples
label_indices = np.where(labels == label)[0]
np.random.seed(self.seed)
np.random.shuffle(label_indices)
# Split the data based on their labels
start = 0
for i in range(num_clients):
end = start + subset_sizes[i]
clients_data[i].extend(label_indices[start:end])
start = end
return clients_data
def percentage_partition(self, dataset, percentage=20):
"""
Partition a dataset into multiple subsets with a specified level of non-IID-ness.
This function divides a dataset into a specified number of subsets (federated
clients), where each subset has a different class distribution. The class
distribution in each subset is determined by a specified percentage, making the
partition suitable for simulating non-IID (non-Independently and Identically
Distributed) data scenarios in federated learning.
Args:
dataset (torch.utils.data.Dataset): The dataset to partition. It should have
'data' and 'targets' attributes.
percentage (int): A value between 0 and 100 that specifies the desired
level of non-IID-ness for the labels of the federated data.
This percentage controls the imbalance in the class distribution
across different subsets.
Returns:
dict: A dictionary where keys are subset indices (ranging from 0 to partitions_number-1)
and values are lists of indices corresponding to the samples in each subset.
The function ensures that the number of classes in each subset varies based on the selected
percentage. The partitioning process involves iterating over each class, shuffling the
indices of that class, and then splitting them according to the calculated subset sizes.
The function also prints the class distribution in each subset for reference.
Example usage:
federated_data = percentage_partition(my_dataset, percentage=20)
# This creates federated data subsets with varying class distributions based on
# a percentage of 20.
"""
if isinstance(dataset.targets, np.ndarray):
y_train = dataset.targets
elif hasattr(dataset.targets, "numpy"): # Check if it's a tensor with .numpy() method
y_train = dataset.targets.numpy()
else: # If it's a list
y_train = np.asarray(dataset.targets)
num_classes = self.num_classes
num_subsets = self.partitions_number
class_indices = {i: np.where(y_train == i)[0] for i in range(num_classes)}
# Get the labels from the dataset
labels = np.array([dataset.targets[idx] for idx in range(len(dataset))])
label_counts = np.bincount(labels)
min_label = label_counts.argmin()
min_count = label_counts[min_label]
classes_per_subset = int(num_classes * percentage / 100)
if classes_per_subset < 1:
raise ValueError("The percentage is too low to assign at least one class to each subset.")
subset_indices = [[] for _ in range(num_subsets)]
class_list = list(range(num_classes))
np.random.seed(self.seed)
np.random.shuffle(class_list)
for i in range(num_subsets):
for j in range(classes_per_subset):
# Use modulo operation to cycle through the class_list
class_idx = class_list[(i * classes_per_subset + j) % num_classes]
indices = class_indices[class_idx]
np.random.seed(self.seed)
np.random.shuffle(indices)
# Select approximately 50% of the indices
subset_indices[i].extend(indices[: min_count // 2])
class_counts = np.bincount(np.array([dataset.targets[idx] for idx in subset_indices[i]]))
logging_training.info(f"Partition {i+1} class distribution: {class_counts.tolist()}")
partitioned_datasets = {i: subset_indices[i] for i in range(num_subsets)}
return partitioned_datasets
def plot_all_data_distribution(self, dataset, partitions_map):
"""
Plot all of the data distribution of the dataset according to the partitions map provided.
Args:
dataset: The dataset to plot (torch.utils.data.Dataset).
partitions_map: The map of the dataset partitions.
"""
sns.set()
sns.set_style("whitegrid", {"axes.grid": False})
sns.set_context("paper", font_scale=1.5)
sns.set_palette("Set2")
num_clients = len(partitions_map)
num_classes = self.num_classes
plt.figure(figsize=(12, 8))
label_distribution = [[] for _ in range(num_classes)]
for c_id, idc in partitions_map.items():
for idx in idc:
label_distribution[dataset.targets[idx]].append(c_id)
plt.hist(label_distribution, stacked=True, bins=np.arange(-0.5, num_clients + 1.5, 1), label=dataset.classes, rwidth=0.5)
plt.xticks(np.arange(num_clients), ["Participant %d" % (c_id + 1) for c_id in range(num_clients)])
plt.title("Distribution of splited datasets")
plt.xlabel("Participant")
plt.ylabel("Number of samples")
plt.xticks(range(num_clients), [f" {i}" for i in range(num_clients)])
plt.legend(loc="upper right")
plt.tight_layout()
path_to_save = f"{self.config.participant['tracking_args']['log_dir']}/{self.config.participant['scenario_args']['name']}/all_data_distribution_{'iid' if self.iid else 'non_iid'}{'_' + self.partition if not self.iid else ''}.png"
plt.savefig(path_to_save, dpi=300, bbox_inches="tight")
plt.close()
| 24,115 | Python | .py | 452 | 42.424779 | 254 | 0.637903 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,988 | changeablesubset.py | enriquetomasmb_nebula/nebula/core/datasets/changeablesubset.py | import copy
from torch.utils.data import Subset
from nebula.addons.attacks.poisoning.datapoison import datapoison
from nebula.addons.attacks.poisoning.labelflipping import labelFlipping
class ChangeableSubset(Subset):
def __init__(self, dataset, indices, label_flipping=False, data_poisoning=False, poisoned_persent=0, poisoned_ratio=0, targeted=False, target_label=0, target_changed_label=0, noise_type="salt"):
super().__init__(dataset, indices)
new_dataset = dataset
self.dataset = new_dataset
self.indices = indices
self.label_flipping = label_flipping
self.data_poisoning = data_poisoning
self.poisoned_persent = poisoned_persent
self.poisoned_ratio = poisoned_ratio
self.targeted = targeted
self.target_label = target_label
self.target_changed_label = target_changed_label
self.noise_type = noise_type
if self.label_flipping:
self.dataset = labelFlipping(self.dataset, self.indices, self.poisoned_persent, self.targeted, self.target_label, self.target_changed_label)
if self.data_poisoning:
self.dataset = datapoison(self.dataset, self.indices, self.poisoned_persent, self.poisoned_ratio, self.targeted, self.target_label, self.noise_type)
def __getitem__(self, idx):
if isinstance(idx, list):
return self.dataset[[self.indices[i] for i in idx]]
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
| 1,526 | Python | .py | 28 | 46.678571 | 198 | 0.706908 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,989 | datamodule.py | enriquetomasmb_nebula/nebula/core/datasets/datamodule.py | import logging
import torch
from lightning import LightningDataModule
from torch.utils.data import DataLoader, random_split, RandomSampler
from nebula.core.datasets.changeablesubset import ChangeableSubset
from nebula.config.config import TRAINING_LOGGER
logging_training = logging.getLogger(TRAINING_LOGGER)
class DataModule(LightningDataModule):
def __init__(
self,
train_set,
train_set_indices,
test_set,
test_set_indices,
local_test_set_indices,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=0,
val_percent=0.1,
label_flipping=False,
data_poisoning=False,
poisoned_persent=0,
poisoned_ratio=0,
targeted=False,
target_label=0,
target_changed_label=0,
noise_type="salt",
seed=42
):
super().__init__()
self.train_set = train_set
self.train_set_indices = train_set_indices
self.test_set = test_set
self.test_set_indices = test_set_indices
self.local_test_set_indices = local_test_set_indices
self.partition_id = partition_id
self.partitions_number = partitions_number
self.batch_size = batch_size
self.num_workers = num_workers
self.val_percent = val_percent
self.label_flipping = label_flipping
self.data_poisoning = data_poisoning
self.poisoned_persent = poisoned_persent
self.poisoned_ratio = poisoned_ratio
self.targeted = targeted
self.target_label = target_label
self.target_changed_label = target_changed_label
self.noise_type = noise_type
self.seed = seed
self.model_weight = None
self.val_indices = None
# Split train and validation datasets
self.data_train = None
self.data_val = None
self.global_te_subset = None
self.local_te_subset = None
def setup(self, stage=None):
if stage in (None, 'fit'):
tr_subset = ChangeableSubset(
self.train_set,
self.train_set_indices,
label_flipping=self.label_flipping,
data_poisoning=self.data_poisoning,
poisoned_persent=self.poisoned_persent,
poisoned_ratio=self.poisoned_ratio,
targeted=self.targeted,
target_label=self.target_label,
target_changed_label=self.target_changed_label,
noise_type=self.noise_type,
)
if self.val_indices is None:
generator = torch.Generator()
generator.manual_seed(self.seed)
train_size = round(len(tr_subset) * (1 - self.val_percent))
val_size = len(tr_subset) - train_size
self.data_train, self.data_val = random_split(tr_subset, [train_size, val_size], generator=generator)
self.val_indices = self.data_val.indices
else:
train_indices = list(set(range(len(tr_subset))) - set(self.val_indices))
val_indices = self.val_indices
self.data_train = ChangeableSubset(tr_subset, train_indices)
self.data_val = ChangeableSubset(tr_subset, val_indices)
self.model_weight = len(self.data_train)
if stage in (None, 'test'):
# Test sets
self.global_te_subset = ChangeableSubset(self.test_set, self.test_set_indices)
self.local_te_subset = ChangeableSubset(self.test_set, self.local_test_set_indices)
if len(self.test_set) < self.partitions_number:
raise ValueError("Too many partitions for the size of the test set.")
def teardown(self, stage=None):
# Teardown the datasets
if stage in (None, 'fit'):
self.data_train = None
self.data_val = None
if stage in (None, 'test'):
self.global_te_subset = None
self.local_te_subset = None
def train_dataloader(self):
if self.data_train is None:
raise ValueError("Train dataset not initialized. Please call setup('fit') before requesting train_dataloader.")
logging_training.info("Train set size: {}".format(len(self.data_train)))
return DataLoader(
self.data_train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
drop_last=True,
pin_memory=False,
)
def val_dataloader(self):
if self.data_val is None:
raise ValueError("Validation dataset not initialized. Please call setup('fit') before requesting val_dataloader.")
logging_training.info("Validation set size: {}".format(len(self.data_val)))
return DataLoader(
self.data_val,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=True,
pin_memory=False,
)
def test_dataloader(self):
if self.local_te_subset is None or self.global_te_subset is None:
raise ValueError("Test datasets not initialized. Please call setup('test') before requesting test_dataloader.")
logging_training.info("Local test set size: {}".format(len(self.local_te_subset)))
logging_training.info("Global test set size: {}".format(len(self.global_te_subset)))
return [
DataLoader(
self.local_te_subset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=True,
pin_memory=False,
),
DataLoader(
self.global_te_subset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=True,
pin_memory=False,
),
]
def bootstrap_dataloader(self):
if self.data_val is None:
raise ValueError("Validation dataset not initialized. Please call setup('fit') before requesting bootstrap_dataloader.")
random_sampler = RandomSampler(
data_source=self.data_val, replacement=False, num_samples=max(int(len(self.data_val) / 3), 300)
)
logging_training.info("Bootstrap samples: {}".format(len(random_sampler)))
return DataLoader(
self.data_train,
batch_size=self.batch_size,
shuffle=False,
sampler=random_sampler,
num_workers=self.num_workers,
drop_last=True,
pin_memory=False,
)
| 6,820 | Python | .py | 161 | 30.745342 | 132 | 0.602283 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,990 | mnist.py | enriquetomasmb_nebula/nebula/core/datasets/mnist/mnist.py | import os
from nebula.core.datasets.nebuladataset import NebulaDataset
from torchvision import transforms
from torchvision.datasets import MNIST
class MNISTDataset(NebulaDataset):
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
if partition_id < 0 or partition_id >= partitions_number:
raise ValueError(f"partition_id {partition_id} is out of range for partitions_number {partitions_number}")
def initialize_dataset(self):
if self.train_set is None:
self.train_set = self.load_mnist_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_mnist_dataset(train=False)
self.test_indices_map = list(range(len(self.test_set)))
# Depending on the iid flag, generate a non-iid or iid map of the train set
if self.iid:
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map (global): {len(self.test_indices_map)}")
print(f"Length of test indices map (local): {len(self.local_test_indices_map)}")
def load_mnist_dataset(self, train=True):
apply_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,), inplace=True)])
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
os.makedirs(data_dir, exist_ok=True)
return MNIST(
data_dir,
train=train,
download=True,
transform=apply_transforms,
)
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 3,988 | Python | .py | 80 | 39.7875 | 124 | 0.654906 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,991 | mnist.py | enriquetomasmb_nebula/nebula/core/datasets/mnistML/mnist.py | import os
import sys
import numpy as np
from sklearn.model_selection import train_test_split
from torchvision import transforms
from torchvision.datasets import MNIST
class MNISTDatasetScikit:
mnist_train = None
mnist_val = None
def __init__(self, partition_id=0, partitions_number=1, iid=True):
self.partition_id = partition_id
self.partitions_number = partitions_number
self.iid = iid
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
os.makedirs(data_dir, exist_ok=True)
if MNISTDatasetScikit.mnist_train is None:
MNISTDatasetScikit.mnist_train = MNIST(
data_dir,
train=True,
download=True,
transform=transforms.ToTensor(),
)
if not iid:
sorted_indexes = MNISTDatasetScikit.mnist_train.targets.sort()[1]
MNISTDatasetScikit.mnist_train.targets = MNISTDatasetScikit.mnist_train.targets[sorted_indexes]
MNISTDatasetScikit.mnist_train.data = MNISTDatasetScikit.mnist_train.data[sorted_indexes]
if MNISTDatasetScikit.mnist_val is None:
MNISTDatasetScikit.mnist_val = MNIST(
data_dir,
train=False,
download=True,
transform=transforms.ToTensor(),
)
if not iid:
sorted_indexes = MNISTDatasetScikit.mnist_val.targets.sort()[1]
MNISTDatasetScikit.mnist_val.targets = MNISTDatasetScikit.mnist_val.targets[sorted_indexes]
MNISTDatasetScikit.mnist_val.data = MNISTDatasetScikit.mnist_val.data[sorted_indexes]
self.train_set = MNISTDatasetScikit.mnist_train
self.test_set = MNISTDatasetScikit.mnist_val
def train_dataloader(self):
X_train = self.train_set.data.numpy().reshape(-1, 28 * 28)
y_train = self.train_set.targets.numpy()
return X_train, y_train
def test_dataloader(self):
X_test = self.test_set.data.numpy().reshape(-1, 28 * 28)
y_test = self.test_set.targets.numpy()
return X_test, y_test
| 2,161 | Python | .py | 47 | 35.489362 | 111 | 0.643536 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,992 | sentiment140.py | enriquetomasmb_nebula/nebula/core/datasets/sentiment140/sentiment140.py | import os
import sys
from datasets import load_dataset
from torchvision.datasets import MNIST
from sklearn.model_selection import train_test_split
from torchtext import vocab
import pandas as pd
from torch.nn.functional import pad
from string import punctuation
import random
import torch
from nebula.core.datasets.nebuladataset import NebulaDataset
class SENTIMENT140(MNIST):
def __init__(self, train=True):
self.root = f"{sys.path[0]}/data"
self.download = True
self.train = train
super(MNIST, self).__init__(self.root)
self.training_file = f"{self.root}/sentiment140/processed/sentiment140_train.pt"
self.test_file = f"{self.root}/sentiment140/processed/sentiment140_test.pt"
if not os.path.exists(f"{self.root}/sentiment140/processed/sentiment140_test.pt") or not os.path.exists(
f"{self.root}/sentiment140/processed/sentiment140_train.pt"
):
if self.download:
self.dataset_download()
else:
raise RuntimeError("Dataset not found, set parameter download=True to download")
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
data_and_targets = torch.load(data_file)
self.data, self.targets = data_and_targets[0], data_and_targets[1]
def __getitem__(self, index):
img, target = self.data[index], int(self.targets[index])
if self.transform is not None:
img = img
if self.target_transform is not None:
target = target
return img, target
def dataset_download(self):
saved_path = f"{self.root}/sentiment140/processed/"
if not os.path.exists(saved_path):
os.makedirs(saved_path)
dataset = load_dataset("sentiment140")
indices = range(0, len(dataset["train"]))
num_samp = 80000
random_index = random.sample(indices, num_samp)
dataset1 = dataset["train"][random_index]
data_df = pd.DataFrame(dataset1)
data_df["sentiment"] = data_df["sentiment"].replace(to_replace=4, value=1)
vec = vocab.FastText()
tokenlized_text_data = data_df["text"].apply(str.lower).apply(str.split)
table = str.maketrans("", "", punctuation)
tokenlized_text_data = tokenlized_text_data.apply(lambda x: [w.translate(table) for w in x])
tokenlized_text_data = tokenlized_text_data.apply(vec.get_vecs_by_tokens).tolist()
tokenlized_text_data = [pad(i, [0, 0, 0, 64 - i.shape[0]], "constant", 0) for i in tokenlized_text_data]
tokenlized_text_data = torch.stack(tokenlized_text_data)
text_label = torch.Tensor(data_df["sentiment"].tolist())
X_train, X_test, y_train, y_test = train_test_split(tokenlized_text_data, text_label, test_size=0.15, random_state=False)
train = [X_train, y_train]
test = [X_test, y_test]
train_file = f"{self.root}/sentiment140/processed/sentiment140_train.pt"
test_file = f"{self.root}/sentiment140/processed/sentiment140_test.pt"
if not os.path.exists(train_file):
torch.save(train, train_file)
if not os.path.exists(test_file):
torch.save(test, test_file)
class Sentiment140Dataset(NebulaDataset):
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
def initialize_dataset(self):
# Load sent14 train dataset
if self.train_set is None:
self.train_set = self.load_sent14_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_sent14_dataset(train=False)
# All nodes have the same test set (indices are the same for all nodes)
self.test_indices_map = list(range(len(self.test_set)))
# Depending on the iid flag, generate a non-iid or iid map of the train set
if self.iid:
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map (global): {len(self.test_indices_map)}")
print(f"Length of test indices map (local): {len(self.local_test_indices_map)}")
def load_sent14_dataset(self, train=True):
if train:
return SENTIMENT140(train=True)
return SENTIMENT140(train=False)
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 6,744 | Python | .py | 138 | 39.615942 | 129 | 0.654471 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,993 | syscall.py | enriquetomasmb_nebula/nebula/core/datasets/syscall/syscall.py | import os
import sys
import zipfile
import ast
import pandas as pd
from sklearn.model_selection import train_test_split
from torchvision.datasets import MNIST, utils
import torch
from nebula.core.datasets.nebuladataset import NebulaDataset
class SYSCALL(MNIST):
def __init__(
self,
partition_id,
partitions_number,
root_dir,
train=True,
transform=None,
target_transform=None,
download=False,
):
super().__init__(root_dir, transform=None, target_transform=None)
self.transform = transform
self.target_transform = target_transform
self.partition_id = partition_id
self.partitions_number = partitions_number
self.download = download
self.download_link = "https://files.ifi.uzh.ch/CSG/research/fl/data/syscall.zip"
self.train = train
self.root = root_dir
self.training_file = f"{self.root}/syscall/processed/syscall_train.pt"
self.test_file = f"{self.root}/syscall/processed/syscall_test.pt"
if not os.path.exists(f"{self.root}/syscall/processed/syscall_test.pt") or not os.path.exists(f"{self.root}/syscall/processed/syscall_train.pt"):
if self.download:
self.dataset_download()
self.process()
else:
raise RuntimeError("Dataset not found, set parameter download=True to download")
else:
print("SYSCALL dataset already downloaded and processed.")
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
data_and_targets = torch.load(data_file)
self.data, self.targets = data_and_targets[0], data_and_targets[1]
def __getitem__(self, index):
img, target = self.data[index], int(self.targets[index])
if self.transform is not None:
img = img
if self.target_transform is not None:
target = target
return img, target
def dataset_download(self):
paths = [f"{self.root}/syscall/raw/", f"{self.root}/syscall/processed/"]
for path in paths:
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print("Downloading SYSCALL dataset...")
filename = self.download_link.split("/")[-1]
utils.download_and_extract_archive(
self.download_link,
download_root=f"{self.root}/syscall/raw/",
filename=filename,
)
with zipfile.ZipFile(f"{self.root}/syscall/raw/{filename}", "r") as zip_ref:
zip_ref.extractall(f"{self.root}/syscall/raw/")
def process(self):
print("Processing SYSCALL dataset...")
df = pd.DataFrame()
files = os.listdir(f"{self.root}/syscall/raw/")
feature_name = "system calls frequency_1gram-scaled"
for f in files:
if ".csv" in f:
fi_path = f"{self.root}/syscall/raw/{f}"
csv_df = pd.read_csv(fi_path, sep="\t")
feature = [ast.literal_eval(i) for i in csv_df[feature_name]]
csv_df[feature_name] = feature
df = pd.concat([df, csv_df])
df["maltype"] = df["maltype"].replace(to_replace="normalv2", value="normal")
classes_to_targets = {}
t = 0
for i in set(df["maltype"]):
classes_to_targets[i] = t
t += 1
classes = list(classes_to_targets.keys())
for c in classes_to_targets:
df["maltype"] = df["maltype"].replace(to_replace=c, value=classes_to_targets[c])
all_targes = torch.tensor(df["maltype"].tolist())
all_data = torch.tensor(df[feature_name].tolist())
x_train, x_test, y_train, y_test = train_test_split(all_data, all_targes, test_size=0.15, random_state=42)
train = [x_train, y_train, classes_to_targets, classes]
test = [x_test, y_test, classes_to_targets, classes]
train_file = f"{self.root}/syscall/processed/syscall_train.pt"
test_file = f"{self.root}/syscall/processed/syscall_test.pt"
if not os.path.exists(train_file):
torch.save(train, train_file)
if not os.path.exists(test_file):
torch.save(test, test_file)
class SYSCALLDataset(NebulaDataset):
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
def initialize_dataset(self):
# Load syscall train dataset
if self.train_set is None:
self.train_set = self.load_syscall_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_syscall_dataset(train=False)
# All nodes have the same test set (indices are the same for all nodes)
self.test_indices_map = list(range(len(self.test_set)))
# Depending on the iid flag, generate a non-iid or iid map of the train set
if self.iid:
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map: {len(self.test_indices_map)}")
def load_syscall_dataset(self, train=True):
if train:
return SYSCALL(
partition_id=self.partition_id,
partitions_number=self.partitions_number,
root_dir=f"{sys.path[0]}/data",
train=True,
)
else:
return SYSCALL(
partition_id=self.partition_id,
partitions_number=self.partitions_number,
root_dir=f"{sys.path[0]}/data",
train=False,
)
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 8,053 | Python | .py | 178 | 34.938202 | 153 | 0.619339 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,994 | emnist.py | enriquetomasmb_nebula/nebula/core/datasets/emnist/emnist.py | import os
from nebula.core.datasets.nebuladataset import NebulaDataset
from torchvision import transforms
from torchvision.datasets import EMNIST
class EMNISTDataset(NebulaDataset):
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
def initialize_dataset(self):
if self.train_set is None:
self.train_set = self.load_emnist_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_emnist_dataset(train=False)
# All nodes have the same test set (indices are the same for all nodes)
self.test_indices_map = list(range(len(self.test_set)))
# Depending on the iid flag, generate a non-iid or iid map of the train set
if self.iid:
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map (global): {len(self.test_indices_map)}")
print(f"Length of test indices map (local): {len(self.local_test_indices_map)}")
def load_emnist_dataset(self, train=True):
mean = (0.5,) # Adjusted mean for 1 channel
std = (0.5,) # Adjusted std for 1 channel
apply_transforms = transforms.Compose(
[
transforms.RandomCrop(28, padding=4), # Crop size changed to 28
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std, inplace=True),
]
)
return EMNIST(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "data"),
train=train,
download=True,
transform=apply_transforms,
split="digits",
)
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 4,132 | Python | .py | 87 | 37.195402 | 124 | 0.640377 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,995 | militarysar.py | enriquetomasmb_nebula/nebula/core/datasets/militarysar/militarysar.py | import glob
import json
import os
import numpy as np
import logging
import torch
from nebula.core.datasets.nebuladataset import NebulaDataset
from torchvision import transforms
from torch.utils.data import Dataset
from nebula.config.config import TRAINING_LOGGER
logging_training = logging.getLogger(TRAINING_LOGGER)
class RandomCrop(object):
def __init__(self, size):
if isinstance(size, int):
self.size = (size, size)
else:
assert len(size) == 2
self.size = size
def __call__(self, sample):
_input = sample
if len(_input.shape) < 3:
_input = np.expand_dims(_input, axis=2)
h, w, _ = _input.shape
oh, ow = self.size
dh = h - oh
dw = w - ow
y = np.random.randint(0, dh) if dh > 0 else 0
x = np.random.randint(0, dw) if dw > 0 else 0
oh = oh if dh > 0 else h
ow = ow if dw > 0 else w
return _input[y : y + oh, x : x + ow, :]
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, int):
self.size = (size, size)
else:
assert len(size) == 2
self.size = size
def __call__(self, sample):
_input = sample
if len(_input.shape) < 3:
_input = np.expand_dims(_input, axis=2)
h, w, _ = _input.shape
oh, ow = self.size
y = (h - oh) // 2
x = (w - ow) // 2
return _input[y : y + oh, x : x + ow, :]
class MilitarySAR(Dataset):
def __init__(self, name="soc", is_train=False, transform=None):
self.is_train = is_train
self.name = name
self.data = []
self.targets = []
self.serial_numbers = []
# Path to data is "data" folder in the same directory as this file
self.path_to_data = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
self.transform = transform
# self._load_data(self.path_to_data)
mode = "train" if self.is_train else "test"
self.image_list = glob.glob(os.path.join(self.path_to_data, f"{self.name}/{mode}/*/*.npy"))
self.label_list = glob.glob(os.path.join(self.path_to_data, f"{self.name}/{mode}/*/*.json"))
self.image_list = sorted(self.image_list, key=os.path.basename)
self.label_list = sorted(self.label_list, key=os.path.basename)
assert len(self.image_list) == len(self.label_list)
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
_image = np.load(self.image_list[idx])
with open(self.label_list[idx], "r", encoding="utf-8") as f:
label_info = json.load(f)
_label = label_info["class_id"]
# serial_number = label_info['serial_number']
if self.transform:
_image = self.transform(_image)
return _image, _label
def _load_metadata(self):
self.targets = []
self.serial_numbers = []
for label_path in self.label_list:
with open(label_path, "r", encoding="utf-8") as f:
label_info = json.load(f)
self.targets.append(label_info["class_id"])
self.serial_numbers.append(label_info["serial_number"])
def get_targets(self):
if not self.targets:
logging_training.info(f"Loading Metadata for {self.__class__.__name__}")
self._load_metadata()
return self.targets
# def _load_data(self, path):
# logging_training.info(f'Loading {self.__class__.__name__} dataset: {self.name} | is_train: {self.is_train} | from {self.path_to_data}')
# mode = 'train' if self.is_train else 'test'
# image_list = glob.glob(os.path.join(self.path_to_data, f'{self.name}/{mode}/*/*.npy'))
# label_list = glob.glob(os.path.join(self.path_to_data, f'{self.name}/{mode}/*/*.json'))
# image_list = sorted(image_list, key=os.path.basename)
# label_list = sorted(label_list, key=os.path.basename)
# for image_path, label_path in zip(image_list, label_list):
# self.data.append(np.load(image_path))
# with open(label_path, mode='r', encoding='utf-8') as f:
# _label = json.load(f)
# self.targets.append(_label['class_id'])
# self.serial_number.append(_label['serial_number'])
# self.data = np.array(self.data)
# self.targets = np.array(self.targets)
class MilitarySARDataset(NebulaDataset):
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
def initialize_dataset(self):
if self.train_set is None:
self.train_set = self.load_militarysar_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_militarysar_dataset(train=False)
train_targets = self.train_set.get_targets()
test_targets = self.test_set.get_targets()
self.test_indices_map = list(range(len(self.test_set)))
# Depending on the iid flag, generate a non-iid or iid map of the train set
if self.iid:
logging_training.info("Generating IID partition - Train")
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
logging_training.info("Generating IID partition - Test")
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
logging_training.info("Generating Non-IID partition - Train")
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
logging_training.info("Generating Non-IID partition - Test")
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map (global): {len(self.test_indices_map)}")
print(f"Length of test indices map (local): {len(self.local_test_indices_map)}")
def load_militarysar_dataset(self, train=True):
apply_transforms = [CenterCrop(88), transforms.ToTensor()]
if train:
apply_transforms = [RandomCrop(88), transforms.ToTensor()]
return MilitarySAR(name="soc", is_train=train, transform=transforms.Compose(apply_transforms))
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 8,464 | Python | .py | 180 | 37.911111 | 145 | 0.616302 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,996 | wadi.py | enriquetomasmb_nebula/nebula/core/datasets/wadi/wadi.py | import os
import sys
from torchvision.datasets import MNIST
from nebula.core.datasets.nebuladataset import NebulaDataset
import urllib.request
import numpy as np
import torch
class WADI(MNIST):
def __init__(self, partition_id, partitions_number, root_dir, train=True):
super(MNIST, self).__init__(root_dir, transform=None, target_transform=None)
self.partition_id = partition_id
self.partitions_number = partitions_number
self.download_link = "XXXX"
self.files = ["X_train.npy", "y_train.npy", "X_test.npy", "y_test.npy"]
self.train = train
self.root = root_dir
if not os.path.exists(f"{self.root}/WADI/X_train.npy") or not os.path.exists(f"{self.root}/WADI/y_train.npy") or not os.path.exists(f"{self.root}/WADI/X_test.npy") or not os.path.exists(f"{self.root}/WADI/y_test.npy"):
self.dataset_download()
if self.train:
data_file = self.training_file
self.data, self.targets = torch.from_numpy(np.load(f"{self.root}/WADI/X_train.npy")), torch.from_numpy(np.load(f"{self.root}/WADI/y_train.npy"))
self.data = self.data.to(torch.float32)
self.targets = self.targets.to(torch.float32)
else:
data_file = self.test_file
self.data, self.targets = torch.from_numpy(np.load(f"{self.root}/WADI/X_test.npy")), torch.from_numpy(np.load(f"{self.root}/WADI/y_test.npy"))
self.data = self.data.to(torch.float32)
self.targets = self.targets.to(torch.float32)
def __getitem__(self, index):
img, target = self.data[index], int(self.targets[index])
return img, target
def dataset_download(self):
paths = [f"{self.root}/WADI/"]
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
for file in self.files:
urllib.request.urlretrieve(
os.path.join(f"{self.download_link}", file),
os.path.join(f"{self.root}/WADI/", file),
)
class WADIDataModule(NebulaDataset):
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
def initialize_dataset(self):
# Load wadi train dataset
if self.train_set is None:
self.train_set = self.load_wadi_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_wadi_dataset(train=False)
# All nodes have the same test set (indices are the same for all nodes)
self.test_indices_map = list(range(len(self.test_set)))
# Depending on the iid flag, generate a non-iid or iid map of the train set
if self.iid:
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map: {len(self.test_indices_map)}")
print(f"Lenght of test indices map (global): {len(self.test_indices_map)}")
print(f"Length of test indices map (local): {len(self.local_test_indices_map)}")
def load_wadi_dataset(self, train=True):
if train:
return WADI(
partition_id=self.partition_id,
partitions_number=self.partitions_number,
root_dir=f"{sys.path[0]}/data",
train=True,
)
else:
return WADI(
partition_id=self.partition_id,
partitions_number=self.partitions_number,
root_dir=f"{sys.path[0]}/data",
train=False,
)
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 5,969 | Python | .py | 123 | 38.073171 | 226 | 0.629063 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,997 | kitsun.py | enriquetomasmb_nebula/nebula/core/datasets/kitsun/kitsun.py | import os
import sys
from torchvision.datasets import MNIST, utils
from nebula.core.datasets.nebuladataset import NebulaDataset
import shutil
import zipfile
import torch
class KITSUN(MNIST):
def __init__(self, train=True):
self.root = f"{sys.path[0]}/data"
self.download = True
self.train = train
super(MNIST, self).__init__(self.root)
self.training_file = f"{self.root}/kitsun/processed/kitsun_train.pt"
self.test_file = f"{self.root}/kitsun/processed/kitsun_test.pt"
if not os.path.exists(f"{self.root}/kitsun/processed/kitsun_test.pt") or not os.path.exists(f"{self.root}/kitsun/processed/kitsun_train.pt"):
if self.download:
self.dataset_download()
else:
raise RuntimeError("Dataset not found, set parameter download=True to download")
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
data_and_targets = torch.load(data_file)
self.data, self.targets = data_and_targets[0], data_and_targets[1]
self.data = self.data
def __getitem__(self, index):
img, target = self.data[index], int(self.targets[index])
if self.transform is not None:
img = img
if self.target_transform is not None:
target = target
return img, target
def dataset_download(self):
paths = [f"{self.root}/kitsun/raw/", f"{self.root}/kitsun/processed/"]
for path in paths:
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
data_link = "https://files.ifi.uzh.ch/CSG/research/fl/data/kitsun.zip"
filename = data_link.split("/")[-1]
utils.download_and_extract_archive(data_link, download_root=f"{self.root}/kitsun/raw/", filename=filename)
with zipfile.ZipFile(f"{self.root}/kitsun/raw/{filename}", "r") as zip_ref:
zip_ref.extractall(f"{self.root}/kitsun/raw/")
train_raw = f"{self.root}/kitsun/raw/kitsun_train.pt"
test_raw = f"{self.root}/kitsun/raw/kitsun_test.pt"
train_file = f"{self.root}/kitsun/processed/kitsun_train.pt"
test_file = f"{self.root}/kitsun/processed/kitsun_test.pt"
if not os.path.exists(train_file):
shutil.copy(train_raw, train_file)
if not os.path.exists(test_file):
shutil.copy(test_raw, test_file)
class KITSUNDataset(NebulaDataset):
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
def initialize_dataset(self):
# Load CIFAR10 train dataset
if self.train_set is None:
self.train_set = self.load_kitsun_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_kitsun_dataset(train=False)
# All nodes have the same test set (indices are the same for all nodes)
self.test_indices_map = list(range(len(self.test_set)))
# Depending on the iid flag, generate a non-iid or iid map of the train set
if self.iid:
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map (global): {len(self.test_indices_map)}")
print(f"Length of test indices map (local): {len(self.local_test_indices_map)}")
def load_kitsun_dataset(self, train=True):
if train:
return KITSUN(train=True)
return KITSUN(train=False)
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 5,903 | Python | .py | 122 | 38.680328 | 149 | 0.643688 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,998 | cifar10.py | enriquetomasmb_nebula/nebula/core/datasets/cifar10/cifar10.py | from nebula.core.datasets.nebuladataset import NebulaDataset
from torchvision import transforms
from torchvision.datasets import CIFAR10
import os
class CIFAR10Dataset(NebulaDataset):
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
def initialize_dataset(self):
# Load CIFAR10 train dataset
if self.train_set is None:
self.train_set = self.load_cifar10_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_cifar10_dataset(train=False)
# All nodes have the same test set (indices are the same for all nodes)
self.test_indices_map = list(range(len(self.test_set)))
# Depending on the iid flag, generate a non-iid or iid map of the train set
if self.iid:
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map (global): {len(self.test_indices_map)}")
print(f"Length of test indices map (local): {len(self.local_test_indices_map)}")
def load_cifar10_dataset(self, train=True):
mean = (0.4914, 0.4822, 0.4465)
std = (0.2471, 0.2435, 0.2616)
apply_transforms = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std, inplace=True),
]
)
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
os.makedirs(data_dir, exist_ok=True)
return CIFAR10(
data_dir,
train=train,
download=True,
transform=apply_transforms,
)
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 4,180 | Python | .py | 89 | 36.606742 | 124 | 0.640433 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,999 | fashionmnist.py | enriquetomasmb_nebula/nebula/core/datasets/fashionmnist/fashionmnist.py | import os
from nebula.core.datasets.nebuladataset import NebulaDataset
from torchvision import transforms
from torchvision.datasets import FashionMNIST
class FashionMNISTDataset(NebulaDataset):
def __init__(
self,
num_classes=10,
partition_id=0,
partitions_number=1,
batch_size=32,
num_workers=4,
iid=True,
partition="dirichlet",
partition_parameter=0.5,
seed=42,
config=None,
):
super().__init__(
num_classes=num_classes,
partition_id=partition_id,
partitions_number=partitions_number,
batch_size=batch_size,
num_workers=num_workers,
iid=iid,
partition=partition,
partition_parameter=partition_parameter,
seed=seed,
config=config,
)
if partition_id < 0 or partition_id >= partitions_number:
raise ValueError(f"partition_id {partition_id} is out of range for partitions_number {partitions_number}")
def initialize_dataset(self):
if self.train_set is None:
self.train_set = self.load_fmnist_dataset(train=True)
if self.test_set is None:
self.test_set = self.load_fmnist_dataset(train=False)
self.test_indices_map = list(range(len(self.test_set)))
if self.iid:
self.train_indices_map = self.generate_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_iid_map(self.test_set, self.partition, self.partition_parameter)
else:
self.train_indices_map = self.generate_non_iid_map(self.train_set, self.partition, self.partition_parameter)
self.local_test_indices_map = self.generate_non_iid_map(self.test_set, self.partition, self.partition_parameter)
print(f"Length of train indices map: {len(self.train_indices_map)}")
print(f"Lenght of test indices map (global): {len(self.test_indices_map)}")
print(f"Length of test indices map (local): {len(self.local_test_indices_map)}")
def load_fmnist_dataset(self, train=True):
apply_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,), inplace=True)])
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
os.makedirs(data_dir, exist_ok=True)
return FashionMNIST(
data_dir,
train=train,
download=True,
transform=apply_transforms,
)
def generate_non_iid_map(self, dataset, partition="dirichlet", partition_parameter=0.5):
if partition == "dirichlet":
partitions_map = self.dirichlet_partition(dataset, alpha=partition_parameter)
elif partition == "percent":
partitions_map = self.percentage_partition(dataset, percentage=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for Non-IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
def generate_iid_map(self, dataset, partition="balancediid", partition_parameter=2):
if partition == "balancediid":
partitions_map = self.balanced_iid_partition(dataset)
elif partition == "unbalancediid":
partitions_map = self.unbalanced_iid_partition(dataset, imbalance_factor=partition_parameter)
else:
raise ValueError(f"Partition {partition} is not supported for IID map")
if self.partition_id == 0:
self.plot_data_distribution(dataset, partitions_map)
self.plot_all_data_distribution(dataset, partitions_map)
return partitions_map[self.partition_id]
| 3,926 | Python | .py | 79 | 39.64557 | 124 | 0.656381 | enriquetomasmb/nebula | 8 | 2 | 2 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |