id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,289,800 | dev.py | watermelon1024_EEW/dev.py | import asyncio
import logging
import os
import random
import time
from aiohttp import web
DEV_SERVER_HOST = "127.0.0.1"
DEV_SERVER_PORT = 8000
loop = asyncio.new_event_loop()
app = web.Application()
routes = web.RouteTableDef()
from src import Client, Config, InterceptHandler, Logging, WebSocketConnectionConfig, WebSocketService
config = Config()
logger = Logging(
retention=config["log"]["retention"],
debug_mode=config["debug-mode"],
format=config["log"]["format"],
).get_logger()
logging.basicConfig(
handlers=[InterceptHandler(logger)],
level=0 if config["debug-mode"] else logging.INFO,
force=True,
)
key = os.getenv("API_KEY")
if key:
logger.info("API_KEY found, using WebSocket Client")
ws_config = WebSocketConnectionConfig(key=key, service=[WebSocketService.EEW, WebSocketService.TREM_EEW])
else:
logger.info("API_KEY not found, using HTTP Client")
ws_config = None
client = Client(
config=config, logger=logger, websocket_config=ws_config, debug=config["debug-mode"], loop=loop
)
client._http.DOMAIN = f"{DEV_SERVER_HOST}:{DEV_SERVER_PORT}"
client._http.API_NODES = [f"http://{DEV_SERVER_HOST}:{DEV_SERVER_PORT}/api/v1"]
client._http.WS_NODES = [f"ws://{DEV_SERVER_HOST}:{DEV_SERVER_PORT}/websocket"]
async def start_client():
await asyncio.sleep(5)
client.load_notification_clients("notification")
await client.start()
async def on_startup(_):
global task
task = loop.create_task(start_client())
async def on_shutdown(_):
task.cancel()
await task
app.on_startup.append(on_startup)
app.on_shutdown.append(on_shutdown)
# web api
content = []
eq_id = 1130699
async def update_earthquake_data():
global eq_id
await asyncio.sleep(10)
eq_id += 1
earthquake_data = {
"id": f"{eq_id}",
"author": "測試資料",
"serial": 1,
"final": 0,
"eq": {
"lat": 24.23,
"lon": 122.16,
"depth": 40,
"loc": "花蓮縣外海",
"mag": 6.9,
"time": int(time.time() - 12.5) * 1000, # 使用當前時間
"max": 5,
},
"time": int(time.time() * 1000), # 使用當前時間
}
content.append(earthquake_data)
while True:
await asyncio.sleep(random.uniform(0.5, 3))
earthquake_data["serial"] += 1
earthquake_data["eq"]["mag"] += random.uniform(-0.05, 0.1) # 模擬震級變化
earthquake_data["eq"]["mag"] = round(earthquake_data["eq"]["mag"], 1)
earthquake_data["eq"]["depth"] += random.randint(-1, 3) * 5 # 模擬深度變化
earthquake_data["eq"]["lat"] += random.uniform(-0.2, 0.1) # 模擬經緯度變化
earthquake_data["eq"]["lon"] += random.uniform(-0.2, 0.1)
earthquake_data["eq"]["lat"] = round(earthquake_data["eq"]["lat"], 2)
earthquake_data["eq"]["lon"] = round(earthquake_data["eq"]["lon"], 2)
current_time = int(time.time() * 1000)
earthquake_data["time"] = current_time # 更新發報時間
if earthquake_data["serial"] >= 5:
earthquake_data["final"] = 1 # 假設 5 次更新後即為最終報告
break
await asyncio.sleep(20)
content.pop(0)
@routes.get("/api/v1/eq/eew")
async def get_earthquake(request):
return web.json_response(content)
@routes.get("/post")
async def post_earthquake(request):
asyncio.create_task(update_earthquake_data())
return web.Response(text="Started earthquake data update task")
app.add_routes(routes)
if __name__ == "__main__":
web.run_app(app, host=DEV_SERVER_HOST, port=DEV_SERVER_PORT, loop=loop)
| 3,647 | Python | .py | 99 | 30.505051 | 109 | 0.64718 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,801 | main.py | watermelon1024_EEW/main.py | import logging
import os
from dotenv import load_dotenv
load_dotenv(override=True)
def main():
from src import Client, Config, InterceptHandler, Logging, WebSocketConnectionConfig, WebSocketService
config = Config()
logger = Logging(
retention=config["log"]["retention"],
debug_mode=config["debug-mode"],
format=config["log"]["format"],
).get_logger()
logging.basicConfig(
handlers=[InterceptHandler(logger)],
level=0 if config["debug-mode"] else logging.INFO,
force=True,
)
key = os.getenv("API_KEY")
if key:
logger.info("API_KEY found, using WebSocket Client")
ws_config = WebSocketConnectionConfig(
key=key, service=[WebSocketService.EEW, WebSocketService.TREM_EEW]
)
else:
logger.info("API_KEY not found, using HTTP Client")
ws_config = None
client = Client(config=config, logger=logger, websocket_config=ws_config, debug=config["debug-mode"])
client.load_notification_clients("notification")
client.run()
logger.remove()
if __name__ == "__main__":
main()
| 1,127 | Python | .py | 32 | 29 | 106 | 0.670664 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,802 | line.py | watermelon1024_EEW/notification/line.py | import asyncio
import os
import aiohttp
from src import EEW, BaseNotificationClient, Config, Logger
LINE_API_NODE = "https://api.line.me/v2"
class LineNotification(BaseNotificationClient):
"""
Represents a linebot EEW notification client.
"""
alerts: dict[str, str] = {}
notification_channels: list[str] = []
def __init__(self, logger: Logger, config: Config, access_token: str, channel_secret: str) -> None:
"""
Initialize a new linebot notification client.
:param logger: The logger instance.
:type logger: Logger
:param config: The configuration.
:type config: Config
:param access_token: The LINE Messaging API access token.
:type access_token: str
:param channel_secret: The LINE Messaging API channel secret.
:type channel_secret: str
"""
self.logger = logger
self.config = config
self.__access_token = access_token
self.__channel_secret = channel_secret
for channel_id in self.config["channels"]:
# TODO: check channel status
self.notification_channels.append(channel_id)
def _flex_message(self, eew: EEW):
eq = eew.earthquake
time_str = eq.time.strftime("%H:%M:%S")
summary = f"地震速報:{time_str}於{eq.location.display_name or eq.location}發生規模 M{eq.mag} 地震"
image = f"https://static-maps.yandex.ru/1.x/?ll={eq.lon},{eq.lat}&z=10&l=map&size=650,450&pt={eq.lon},{eq.lat},round"
provider = f"{eew.provider.display_name} ({eew.provider.name})"
serial = f"編號:{eew.id} (第{eew.serial}報)"
time = f"發生時間:{time_str}"
location = f"震央:{eq.location.display_name or eq.location}"
magnitude = f"規模:M{eq.mag}"
depth = f"深度:{eq.depth}公里"
return [
{
"type": "flex",
"altText": summary,
"contents": {
"type": "bubble",
"header": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "地震速報",
"weight": "bold",
"size": "md",
"align": "start",
"gravity": "center",
},
{
"type": "text",
"text": provider,
"size": "sm",
"color": "#0000FFFF",
"align": "end",
"gravity": "center",
},
],
},
{"type": "text", "text": serial},
],
},
"hero": {
"type": "image",
"url": image,
"align": "center",
"gravity": "center",
"size": "xxl",
"aspectRatio": "13:9",
},
"body": {
"type": "box",
"layout": "vertical",
"spacing": "md",
"contents": [
{
"type": "text",
"text": "慎防強烈搖晃,就近避難\n[趴下、掩護、穩住]",
"weight": "bold",
"size": "lg",
"align": "center",
"gravity": "top",
"wrap": True,
},
{"type": "separator"},
{
"type": "box",
"layout": "vertical",
"contents": [
{"type": "text", "text": time, "margin": "md", "wrap": True},
{"type": "separator", "margin": "md"},
{"type": "text", "text": location, "margin": "md", "wrap": True},
{"type": "separator", "margin": "md"},
{"type": "text", "text": magnitude, "margin": "md", "wrap": True},
{"type": "separator", "margin": "md"},
{"type": "text", "text": depth, "margin": "md", "wrap": True},
{"type": "separator", "margin": "md"},
],
},
],
},
"footer": {
"type": "box",
"layout": "horizontal",
"spacing": "md",
"contents": [
{
"type": "button",
"action": {
"type": "uri",
"label": "地震報告",
"uri": "https://www.cwa.gov.tw/V8/C/E/index.html",
},
}
],
},
},
}
]
async def _send_message(self, session: aiohttp.ClientSession, channel_id: str, message: dict) -> None:
try:
async with session.post(
f"{LINE_API_NODE}/bot/message/push", json={"to": channel_id, "messages": message}
) as response:
if not response.ok:
raise aiohttp.ClientResponseError(
response.request_info, status=response.status, message=await response.text()
)
except Exception as e:
self.logger.exception(f"Failed to send message alert to {channel_id}", exc_info=e)
async def send_eew(self, eew: EEW) -> None:
"""
If an new EEW is detected, this method will be called.
Note: This method should not do any blocking calls.
:param eew: The EEW.
:type eew: EEW
"""
if not self.notification_channels:
self.logger.error("No LINE notification channels available")
return
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.__access_token}"}
msg = self._flex_message(eew)
async with aiohttp.ClientSession(headers=headers) as session:
await asyncio.gather(
*(self._send_message(session, channel_id, msg) for channel_id in self.notification_channels)
)
async def update_eew(self, eew: EEW):
"""
If an EEW is updated, this method will be called.
Note: This method should not do any blocking calls.
:param eew: The updated EEW.
:type eew: EEW
"""
if not self.notification_channels:
self.logger.error("No LINE notification channels available")
return
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.__access_token}"}
msg = self._flex_message(eew)
async with aiohttp.ClientSession(headers=headers) as session:
await asyncio.gather(
*(self._send_message(session, channel_id, msg) for channel_id in self.notification_channels)
)
async def lift_eew(self, eew: EEW):
"""
If an EEW alert was lifted, this method will be called.
Note: This method should not do any blocking calls.
:param eew: The lifted EEW.
:type eew: EEW
"""
pass
async def start(self) -> None:
"""
The entrypoint for the notification client.
"""
self.logger.info("LINE Bot is ready")
NAMESPACE = "line-bot"
def register(config: Config, logger: Logger) -> None:
"""
Register the linebot notification client.
Note: DO NOT run or start the client in this function, just register it.
If you want to run it in the event loop, do it in :method:`NotificationClient.run`.
:param config: The configuration of linebot notification client.
:type config: Config
:param logger: The logger instance.
:type logger: Logger
"""
access_token = os.environ.get("LINEBOT_ACCESS_TOKEN")
channel_secret = os.environ.get("LINEBOT_CHANNEL_SECRET")
if access_token is None or channel_secret is None:
logger.error(f"{NAMESPACE} LINEBOT_ACCESS_TOKEN or LINEBOT_CHANNEL_SECRET is not set")
return
return LineNotification(logger, config, access_token, channel_secret)
| 9,456 | Python | .py | 206 | 27.126214 | 125 | 0.438571 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,803 | message.py | watermelon1024_EEW/notification/discord/message.py | import asyncio
import math
from datetime import datetime
from typing import TYPE_CHECKING, Optional, TypedDict
import discord
from src import EEW
if TYPE_CHECKING:
from .bot import DiscordNotification
class NotificationChannel(TypedDict):
channel: discord.TextChannel
mention: Optional[str]
class _SingleMessage:
"""
Represents a single message.
"""
__slots__ = ("message", "mention", "edit")
def __init__(self, message: discord.Message, mention: Optional[str]) -> None:
self.message = message
self.mention = mention
self.edit = self.message.edit
class EEWMessages:
"""
Represents discord messages with EEW data.
"""
__slots__ = (
"bot",
"eew",
"messages",
"__ready",
"_info_embed",
"_intensity_embed",
"_region_intensity",
"map_url",
"_bot_latency",
"_lift_time",
"_last_update",
"_map_update_interval",
)
def __init__(self, bot: "DiscordNotification", eew: EEW, messages: list[_SingleMessage]) -> None:
"""
Initialize a new discord message.
:param bot: The discord bot.
:type bot: DiscordNotification
:param eew: The EEW instance.
:type eew: EEW
:param messages: The discord message.
:type messages: list[_SingleMessage]
"""
self.bot = bot
self.eew = eew
self.messages = messages
self.__ready = asyncio.Event()
self._info_embed: Optional[discord.Embed] = None
self._intensity_embed = None
self._region_intensity: Optional[dict[tuple[str, str], tuple[str, int]]] = None
self.map_url: Optional[str] = None
self._bot_latency: float = 0
self._lift_time = eew.earthquake.time.timestamp() + 120 # 2min
self._last_update: float = 0
self._map_update_interval: float = 1
def info_embed(self) -> discord.Embed:
# shortcut
eew = self.eew
eq = eew._earthquake
self._info_embed = discord.Embed(
title=f"地震速報 第 {eew.serial} 報{'(最終報)' if eew.final else ''}",
description=f"""\
<t:{int(eq.time.timestamp())}:T> 於 {eq.location.display_name or ""}(`{eq.lon:.2f}`, `{eq.lat:.2f}`) 發生有感地震,慎防搖晃!
預估規模 `{eq.mag}`,震源深度 `{eq.depth}` 公里,最大震度{eq.max_intensity.display}
發報單位.{eew.provider.display_name}|發報時間.<t:{int(eew.time.timestamp())}:T>""",
color=0xFF0000,
).set_author(
name="Taiwan Earthquake Early Warning",
icon_url="https://raw.githubusercontent.com/watermelon1024/EEW/main/asset/logo_small.png",
)
return self._info_embed
def get_latency(self) -> float:
"""
Get the bot latency.
"""
if math.isfinite(ping := self.bot.latency):
self._bot_latency = ping
return self._bot_latency
def intensity_embed(self) -> discord.Embed:
if self.eew.earthquake.city_max_intensity is None:
return self._intensity_embed
if self._region_intensity is None:
self.get_region_intensity()
current_time = int(datetime.now().timestamp() + self.get_latency())
self._intensity_embed = discord.Embed(
title="震度等級預估",
description="各縣市預估最大震度|預計抵達時間\n"
+ "\n".join(
f"{city} {town} {intensity}|{f'<t:{time}:R>抵達' if time > current_time else '⚠️已抵達'}"
for (city, town), (intensity, time) in self._region_intensity.items()
)
+ f"\n上次更新:<t:{current_time}:T> (<t:{current_time}:R>)",
color=0xF39C12,
image="attachment://image.png",
).set_footer(text="僅供參考,實際情況以氣象署公布之資料為準")
return self._intensity_embed
def get_region_intensity(self):
self._region_intensity = {
(city, intensity.region.name.ljust(4, " ")): (
intensity.intensity.display,
int(intensity.distance.s_arrival_time.timestamp()),
)
for city, intensity in self.eew.earthquake.city_max_intensity.items()
if intensity.intensity.value > 0
}
# self._lift_time = max(x[1] for x in self._region_intensity.values()) + 10
return self._region_intensity
async def _send_first_message(self):
"Fisrt time send message(s) in discord"
eq = self.eew.earthquake
msg = f"{eq.time.strftime('%H:%M:%S')} 於 {eq.location.display_name or eq.location} 發生規模 {eq.mag} 有感地震,慎防搖晃!"
self.messages = list(
filter(
None,
await asyncio.gather(
*(
self._send_single_message(channel["channel"], msg, channel["mention"])
for channel in self.bot.notification_channels
)
),
)
)
self.__ready.set()
async def _send_single_message(
self, channel: discord.TextChannel, content: str, mention: Optional[str] = None
):
try:
return _SingleMessage(await channel.send(f"{content} {mention or ''}"), mention)
except Exception as e:
self.bot.logger.exception(f"Failed to send message in {channel.name}", exc_info=e)
return None
async def _edit_single_message(self, message: _SingleMessage, intensity_embed: discord.Embed, **kwargs):
try:
return await message.edit(content=message.mention, embeds=[self._info_embed, intensity_embed], **kwargs) # type: ignore
except Exception as e:
self.bot.logger.exception(f"Failed to edit message {message.message.id}", exc_info=e)
return None
@classmethod
async def send(
cls,
bot: "DiscordNotification",
eew: EEW,
) -> Optional["EEWMessages"]:
"""
Send new discord messages.
:param bot: The discord bot.
:type bot: DiscordNotification
:param eew: The EEW instance.
:type eew: EEW
:return: The new discord messages.
:rtype: EEWMessage
"""
self = cls(bot, eew, [])
bot.loop.create_task(self._send_first_message())
self._info_embed = self.info_embed()
self._intensity_embed = discord.Embed(title="震度等級預估", description="計算中...")
return self
async def edit(self) -> None:
"""
Edit the discord messages to update S wave arrival time.
"""
intensity_embed = self.intensity_embed()
current_time = datetime.now().timestamp()
await self.__ready.wait() # wait for all messages sent successfully
if not self.map_url or current_time - self._last_update >= self._map_update_interval:
eq = self.eew.earthquake
if not eq.map._drawn:
intensity_embed.remove_image()
file = {}
else:
eq.map.draw_wave(current_time - eq.time.timestamp() + self.get_latency())
file = {"file": discord.File(eq.map.save(), "image.png")}
self._last_update = datetime.now().timestamp()
self._map_update_interval = max(self._last_update - current_time, self._map_update_interval)
m = await self._edit_single_message(self.messages[0], intensity_embed, **file)
if len(m.embeds) > 1 and (image := m.embeds[1].image):
self.map_url = image.url
elif self.eew.earthquake.map.image is not None:
# if intensity calc has done but map not drawn
self.bot.logger.warning("Failed to get image url.")
update = ()
intensity_embed = self.intensity_embed()
else:
update = (self._edit_single_message(self.messages[0], intensity_embed.copy()),)
intensity_embed.set_image(url=self.map_url)
await asyncio.gather(
*update,
*(self._edit_single_message(msg, intensity_embed) for msg in self.messages[1:]),
return_exceptions=True,
)
async def update_eew_data(self, eew: EEW) -> "EEWMessages":
"""
Update EEW data.
:param eew: The EEW instance.
:type eew: EEW
"""
self.eew = eew
self.map_url = None
self.info_embed()
return self
async def lift_eew(self):
"""
Lift the EEW alert.
"""
self._info_embed.title = f"地震速報(共 {self.eew.serial} 報)播報結束"
original_intensity_embed = self._intensity_embed.copy().set_image(url="attachment://image.png")
await asyncio.gather(
self._edit_single_message(self.messages[0], original_intensity_embed),
*(self._edit_single_message(msg, self._intensity_embed) for msg in self.messages[1:]),
return_exceptions=True,
)
| 9,158 | Python | .py | 216 | 31.282407 | 132 | 0.586091 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,804 | bot.py | watermelon1024_EEW/notification/discord/bot.py | from datetime import datetime
import discord
from discord.ext import tasks
from src import EEW, BaseNotificationClient, Config, Logger
from .message import EEWMessages, NotificationChannel
def void(*args, **kwargs):
pass
class DiscordNotification(BaseNotificationClient, discord.Bot):
"""
Represents a discord notification client.
"""
def __init__(self, logger: Logger, config: Config, token: str) -> None:
"""
Initialize a new discord notification client.
:param logger: The logger instance.
:type logger: Logger
:param config: The configuration.
:type config: Config
:param token: The discord bot token.
:type token: str
"""
self.logger = logger
self.config = config
self.token = token
if not config.get("enable-log"):
logger.disable("discord") # avoid pycord shard info spamming the console
self._client_ready = False
intents = discord.Intents.default()
owner_ids = config.get("owners")
discord.Bot.__init__(self, owner_ids=owner_ids, intents=intents)
# eew-id: EEWMessages
self.alerts: dict[str, EEWMessages] = {}
self.notification_channels: list[NotificationChannel] = []
async def get_or_fetch_channel(self, id: int):
return self.get_channel(id) or await self.fetch_channel(id)
async def on_ready(self) -> None:
"""
The event that is triggered when the bot is ready.
"""
if self._client_ready:
return
for data in self.config["channels"]:
id = data.get("id")
try:
channel = await self.get_or_fetch_channel(id)
except discord.NotFound:
self.logger.warning(f"Ignoring channel '{id}': Not found")
continue
except discord.Forbidden:
self.logger.warning(f"Ignoring channel '{id}': No permission to see this channel")
continue
if not channel.can_send(discord.Message, discord.Embed, discord.File):
self.logger.warning(f"Ignoring channel '{id}': No permission to send message")
continue
mention = (
None
if not (m := data.get("mention"))
else (f"<@&{m}>" if isinstance(m, int) else f"@{m.removeprefix('@')}")
)
self.notification_channels.append({"channel": channel, "mention": mention})
if not self.notification_channels:
self.logger.warning("No Discord notification channel available.")
self.send_eew = void
self.update_eew = void
self.lift_eew = void
self.logger.info(
"Discord Bot is ready.\n"
"-------------------------\n"
f"Logged in as: {self.user.name}#{self.user.discriminator} ({self.user.id})\n" # type: ignore
f" API Latency: {self.latency * 1000:.2f} ms\n"
f"Guilds Count: {len(self.guilds)}\n"
"-------------------------"
)
self._client_ready = True
async def start(self) -> None:
self.logger.info("Starting Discord Bot.")
await discord.Bot.start(self, self.token, reconnect=True)
async def close(self) -> None:
await discord.Bot.close(self)
self.logger.info("Discord Bot closed.")
async def send_eew(self, eew: EEW):
m = await EEWMessages.send(self, eew)
if m is None:
self.logger.warning("Failed to send EEW message(s).")
return
self.alerts[eew.id] = m
if not self.update_eew_messages_loop.is_running():
self.update_eew_messages_loop.start()
async def update_eew(self, eew: EEW):
m = self.alerts.get(eew.id)
if m is None:
await self.send_eew(eew)
return
await m.update_eew_data(eew)
async def lift_eew(self, eew: EEW):
m = self.alerts.pop(eew.id, None)
if m is not None:
await m.lift_eew()
@tasks.loop(seconds=1)
async def update_eew_messages_loop(self):
if not self.alerts:
self.update_eew_messages_loop.stop()
return
now_time = int(datetime.now().timestamp())
for m in list(self.alerts.values()):
if now_time > m._lift_time:
self.loop.create_task(self.lift_eew(m.eew))
else:
self.loop.create_task(m.edit())
| 4,524 | Python | .py | 109 | 31.330275 | 106 | 0.584149 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,805 | register.py | watermelon1024_EEW/notification/discord/register.py | import os
from src import Config, Logger
NAMESPACE = "discord-bot"
def register(config: Config, logger: Logger) -> None:
"""
Register the discord notification client.
:param config: The configuration of discord bot.
:type config: Config
:param logger: The logger instance.
:type logger: Logger
"""
token = os.getenv("DISCORD_BOT_TOKEN")
if token is None:
raise ValueError("No discord bot token provided.")
from .bot import DiscordNotification
return DiscordNotification(logger, config, token)
| 552 | Python | .py | 16 | 29.8125 | 58 | 0.720227 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,806 | linenotify.py | watermelon1024_EEW/notification/linenotify/linenotify.py | import asyncio
from datetime import datetime
from typing import Optional
import aiohttp
from src import EEW, BaseNotificationClient, Config, Logger
LINE_NOTIFY_API = "https://notify-api.line.me/api/notify"
class LineNotifyClient(BaseNotificationClient):
"""
Represents a [custom] EEW notification client.
"""
def __init__(self, logger: Logger, config: Config, notify_token: str) -> None:
"""
Initialize a new [custom] notification client.
:param logger: The logger instance.
:type logger: Logger
:param config: The configuration.
:type config: Config
:param notify_token: The LINE Notify API token.
:type notify_token: str
"""
self.logger = logger
self.config = config
self._notify_token = notify_token
self.response_status: int = None
self._region_intensity: Optional[dict[tuple[str, str], tuple[str, int]]] = None
def get_eew_message(self, eew: EEW):
# 取得EEW訊息並排版
eq = eew.earthquake
time_str = eq.time.strftime("%H:%M:%S")
i = {eew.serial} - 1
title = f"\n速報更新{i}"
content = (
f"\n{time_str} 於 {eq.location.display_name or eq.location},\n發生規模 {eq.mag} 地震,"
f"\n震源深度{eq.depth} 公里,\n最大震度{eq.max_intensity.display},"
"\n慎防強烈搖晃,就近避難 趴下、掩護、穩住!"
)
provider = f"\n(發報單位: {eew.provider.display_name})"
if eew.serial > 1:
_message = f"{title} {content} {provider}"
else:
_message = f"{content} {provider}"
return _message
def get_region_intensity(self, eew: EEW):
# 取得各地震度和抵達時間
self._region_intensity = {
(city, intensity.region.name): (
intensity.intensity.display,
int(intensity.distance.s_arrival_time.timestamp()),
)
for city, intensity in eew.earthquake.city_max_intensity.items()
if intensity.intensity.value > 0
}
return self._region_intensity
async def _send_region_intensity(self, eew: EEW):
# 發送各地震度和抵達時間並排版
eq = eew.earthquake
await eq._intensity_calculated.wait()
if eq._intensity_calculated.is_set():
self.get_region_intensity(eew)
if self._region_intensity is not None:
current_time = int(datetime.now().timestamp())
if eew.serial <= 1:
region_intensity_message = "\n以下僅供參考\n實際以氣象署公布為準\n各地最大震度|抵達時間:"
for (city, region), (intensity, s_arrival_time) in self._region_intensity.items():
arrival_time = max(s_arrival_time - current_time, 0)
region_intensity_message += f"\n{city} {region}:{intensity}\n剩餘{arrival_time}秒抵達"
else:
region_intensity_message = "\n以下僅供參考\n實際以氣象署公布為準\n各地最大震度更新:"
for (city, region), (intensity, s_arrival_time) in self._region_intensity.items():
region_intensity_message += f"\n{city} {region}:{intensity}"
_headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": f"Bearer {self._notify_token}",
}
async with aiohttp.ClientSession(headers=_headers) as session:
await self._send_message(session, region_intensity_message=region_intensity_message)
async def _send_image(self, eew: EEW):
# 發送各地震度圖片
eq = eew.earthquake
try:
await eq._calc_task
if eq.map._drawn:
image = eq.map.save().getvalue()
__headers = {"Authorization": f"Bearer {self._notify_token}"}
async with aiohttp.ClientSession(headers=__headers) as session:
await self._send_message(session, image=image)
except asyncio.CancelledError:
pass
async def _send_message(
self,
session: aiohttp.ClientSession,
image=None,
message: str = None,
region_intensity_message: str = None,
) -> None:
try:
form = aiohttp.FormData()
if message:
form.add_field("message", message)
elif region_intensity_message:
form.add_field("message", region_intensity_message)
if image:
form.add_field("message", "\n各地震度(僅供參考)\n以氣象署公布為準")
form.add_field("imageFile", image)
async with session.post(url=LINE_NOTIFY_API, data=form) as response:
if not response.ok:
raise aiohttp.ClientResponseError(
response.request_info,
status=response.status,
history=response.history,
message=await response.text(),
)
else:
self.response_status = response.status
self.logger.info("Message sent to Line-Notify successfully")
except Exception as e:
self.logger.exception(f"Failed to send message alert to Line-Notify: {e}")
async def start(self) -> None:
"""
The entrypoint for the notification client.
If this client doesn't need to run in the event loop, just type `pass` because this method is required.
Note: DO NOT do any blocking calls to run the otification client.
"""
self.logger.info("LINE Notify is ready")
async def send_eew(self, eew: EEW):
"""
If an new EEW is detected, this method will be called.
Note: This method should not do any blocking calls.
:param eew: The EEW.
:type eew: EEW
"""
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": f"Bearer {self._notify_token}",
}
message = self.get_eew_message(eew)
async with aiohttp.ClientSession(headers=headers) as session:
await self._send_message(session, message=message)
await self._send_region_intensity(eew)
asyncio.create_task(self._send_image(eew))
async def update_eew(self, eew: EEW):
"""
If an EEW is updated, this method will be called.
Note: This method should not do any blocking calls.
:param eew: The updated EEW.
:type eew: EEW
"""
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": f"Bearer {self._notify_token}",
}
message = self.get_eew_message(eew)
async with aiohttp.ClientSession(headers=headers) as session:
await self._send_message(session, message=message)
await self._send_region_intensity(eew)
asyncio.create_task(self._send_image(eew))
| 7,352 | Python | .py | 156 | 32.730769 | 112 | 0.576135 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,807 | register.py | watermelon1024_EEW/notification/linenotify/register.py | import os
from src import Config, Logger
NAMESPACE = "line-notify"
def register(config: Config, logger: Logger) -> None:
"""
Register the linenotify notification client.
Note: DO NOT run or start the client in this function, just register it.
If you want to run it in the event loop, do it in :method:`NotificationClient.run`.
:param config: The configuration of linenotify notification client.
:type config: Config
:param logger: The logger instance.
:type logger: Logger
"""
notify_token = os.getenv("LINENOTIFY_TOKEN")
if notify_token is None:
logger.error(f"{NAMESPACE} line-notify token is not set")
return
from .linenotify import LineNotifyClient
return LineNotifyClient(logger, config, notify_token)
| 783 | Python | .py | 19 | 36.210526 | 87 | 0.727513 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,808 | config.py | watermelon1024_EEW/src/config.py | from typing import Any, Union
import tomli
class Config:
"""
The a configuration class.
"""
_path: str = "config.toml"
with open(_path, "rb") as f:
_config: dict = tomli.load(f)
@classmethod
def _load_config(cls) -> dict:
"""
Load the configuration file.
This is an internal method and should not be called directly.
:raises TOMLDecodeError: Raised when the config is invalid.
:raises FileNotFoundError: Raised when the file is not found.
:return: The configuration file.
:rtype: dict
"""
with open(cls._path, "rb") as f:
return tomli.load(f)
@classmethod
def get(cls, key: str, default: Any = None) -> Union[str, Any]:
"""
Get a key from the configuration file.
:param key: The key to get.
:type key: str
:param default: The default value if the key is not found.
:type default: Any, optional
:return: The value of the key.
:rtype: str, Any
"""
return cls._config.get(key, default)
@classmethod
def reload(cls) -> None:
"""
Reload the configuration file.
"""
cls._config = cls._load_config()
def __getitem__(self, key: str) -> Any:
"""
Get a key from the configuration file.
:param key: The key to get.
:type key: str
:return: The value of the key.
:rtype: Any
"""
return self._config[key]
| 1,521 | Python | .py | 48 | 23.8125 | 69 | 0.577793 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,809 | logging.py | watermelon1024_EEW/src/logging.py | import inspect
import logging
import sys
from datetime import timedelta
from typing import Union
from loguru._logger import Core, Logger, _defaults
from .utils import MISSING
class Logging:
"""
Represents a logger class.
"""
def __init__(self, retention: timedelta, debug_mode: bool = False, format: str = MISSING) -> None:
"""
Initialize the logger instance.
"""
level = "DEBUG" if debug_mode else "INFO"
self._logger = Logger(
core=Core(),
exception=None,
depth=0,
record=False,
lazy=False,
colors=False,
raw=False,
capture=True,
patchers=[],
extra={},
)
self._logger.add(
sys.stderr,
level=level,
diagnose=False,
enqueue=True,
format=format or _defaults.LOGURU_FORMAT,
)
self._logger.add(
"./logs/{time:YYYY-MM-DD_HH-mm-ss_SSS}.log",
rotation="00:00",
retention=retention,
encoding="utf-8",
compression="gz",
diagnose=False,
level=level,
enqueue=True,
format=format,
)
self._logger.debug(f"Logger initialized. Debug mode {'enabled' if debug_mode else 'disabled'}.")
def get_logger(self) -> Logger:
"""
The logger instance.
:return: The logger instance.
:rtype: loguru._logger.Logger
"""
return self._logger
class InterceptHandler(logging.Handler):
def __init__(self, logger: Logger) -> None:
super().__init__()
self.logger = logger
def emit(self, record: logging.LogRecord) -> None:
# Get corresponding Loguru level if it exists.
level: Union[str, int]
try:
level = self.logger.level(record.levelname).name
except ValueError:
level = record.levelno
frame, depth = inspect.currentframe(), 0
while frame and (depth == 0 or frame.f_code.co_filename == logging.__file__):
frame = frame.f_back
depth += 1
self.logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
| 2,275 | Python | .py | 70 | 23.157143 | 104 | 0.564523 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,810 | utils.py | watermelon1024_EEW/src/utils.py | from typing import Any
class Missing:
"""
Represents a status of missing.
"""
def __eq__(self, other) -> bool:
return False
def __bool__(self) -> bool:
return False
def __repr__(self) -> str:
return "..."
def __int__(self) -> int:
return 0
def __iter__(self):
return iter([])
MISSING: Any = Missing()
| 382 | Python | .py | 16 | 17.8125 | 36 | 0.532213 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,811 | __init__.py | watermelon1024_EEW/src/__init__.py | """
ExpTech API Client
~~~~~~~~~~~~~~~~~~~
A simple wrapper to connect to ExpTech EEW API
"""
from .client.client import Client
from .client.http import HTTPClient
from .client.websocket import (
AuthorizationFailed,
ExpTechWebSocket,
WebSocketConnectionConfig,
WebSocketEvent,
WebSocketReconnect,
WebSocketService,
)
from .config import Config
from .earthquake.eew import EEW, EarthquakeData, Provider
from .earthquake.location import (
COUNTRY_DATA,
REGIONS,
REGIONS_GROUP_BY_CITY,
TAIWAN_CENTER,
EarthquakeLocation,
Location,
RegionLocation,
)
from .earthquake.map import Map
from .earthquake.model import (
Distance,
Intensity,
RegionExpectedIntensities,
RegionExpectedIntensity,
WaveModel,
calculate_expected_intensity_and_travel_time,
get_wave_model,
round_intensity,
)
from .logging import InterceptHandler, Logger, Logging
from .notification.base import BaseNotificationClient
from .utils import MISSING
| 998 | Python | .py | 40 | 21.8 | 57 | 0.770921 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,812 | base.py | watermelon1024_EEW/src/notification/base.py | from abc import ABC
from ..earthquake.eew import EEW
class BaseNotificationClient(ABC):
"""
An ABC for notification client.
"""
async def send_eew(self, eew: EEW):
"""Send EEW notification"""
pass
async def update_eew(self, eew: EEW):
"""Update EEW notification"""
pass
async def start(self):
"""Start the notification client in async"""
pass
| 421 | Python | .py | 15 | 21.866667 | 52 | 0.63 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,813 | register.py | watermelon1024_EEW/src/notification/template/register.py | """
Register the [custom notification client].
This module registers the [custom notification client] with the provided configuration and logger.
The `register` function is the entry point for registering the client. It creates an instance of the `CustomNotificationClient` and returns it.
See also: https://github.com/watermelon1024/EEW/blob/main/docs/zh-TW/dev/notification.md#註冊客戶端
"""
from src import Config, Logger
NAMESPACE = "[custom-notification]"
"the configuration namespace for [custom notification client]"
def register(config: Config, logger: Logger) -> None:
"""
Register the [custom notification client].
Note: DO NOT run or start the client in this function, just register it.
If you want to run it in the event loop, do it in :method:`NotificationClient.run`.
:param config: The configuration of [custom notification client].
:type config: Config
:param logger: The logger instance.
:type logger: Logger
"""
from .main import CustomNotificationClient
...
return CustomNotificationClient(logger, config)
| 1,087 | Python | .py | 22 | 45.363636 | 143 | 0.765774 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,814 | main.py | watermelon1024_EEW/src/notification/template/main.py | """
The template for a custom notification client.
Note: This template is out of date, please wait for the next update.
See also: https://github.com/watermelon1024/EEW/blob/main/docs/zh-TW/dev/notification.md#開發客戶端功能
"""
from src import EEW, BaseNotificationClient, Config, Logger
class CustomNotificationClient(BaseNotificationClient):
"""
Represents a [custom] EEW notification client.
"""
def __init__(self, logger: Logger, config: Config) -> None:
"""
Initialize a new [custom] notification client.
:param logger: The logger instance.
:type logger: Logger
:param config: The configuration.
:type config: Config
"""
self.logger = logger
self.config = config
...
async def start(self) -> None:
"""
The entrypoint for the notification client.
If this client doesn't need to run in the event loop, just type `pass` because this method is required.
Note: DO NOT do any blocking calls to run the notification client.
Example:
```py
# Bad
time.sleep(10)
requests.post(...)
# Good
await asyncio.sleep(10)
await aiohttp.request("POST", ...)
```
"""
self.logger.info("Starting [Custom Notification Client]...")
...
async def send_eew(self, eew: EEW):
"""
If an new EEW is detected, this method will be called.
Note: This method should not do any blocking calls.
:param eew: The EEW.
:type eew: EEW
"""
...
async def update_eew(self, eew: EEW):
"""
If an EEW is updated, this method will be called.
Note: This method should not do any blocking calls.
:param eew: The updated EEW.
:type eew: EEW
"""
...
| 1,873 | Python | .py | 54 | 26.703704 | 111 | 0.610056 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,815 | client.py | watermelon1024_EEW/src/client/client.py | import asyncio
import importlib
import os
import re
from collections import defaultdict
from typing import Any, Optional
import aiohttp
from cachetools import TTLCache
from ..config import Config
from ..earthquake.eew import EEW
from ..logging import Logger
from ..notification.base import BaseNotificationClient
from .http import HTTPClient
from .websocket import (
AuthorizationFailed,
ExpTechWebSocket,
WebSocketConnectionConfig,
WebSocketEvent,
WebSocketReconnect,
)
class Client:
"""A client for interacting with ExpTech API."""
config: Config
logger: Logger
debug_mode: bool
__eew_source: Optional[list[str]]
alerts: TTLCache[str, EEW]
notification_client: list[BaseNotificationClient]
_loop: Optional[asyncio.AbstractEventLoop]
_http: HTTPClient
_ws: Optional[ExpTechWebSocket]
websocket_config: WebSocketConnectionConfig
event_handlers: defaultdict[str, list]
__ready: asyncio.Event
_reconnect = True
__closed = False
def __init__(
self,
config: Config,
logger: Logger,
websocket_config: WebSocketConnectionConfig = None,
debug: bool = False,
session: aiohttp.ClientSession = None,
loop: asyncio.AbstractEventLoop = None,
):
self.config = config
self.logger = logger
self.debug_mode = debug
self._loop = loop or asyncio.get_event_loop()
self._http = HTTPClient(logger, debug, session=session, loop=self._loop)
self._ws = None
self.websocket_config = websocket_config
self.alerts = TTLCache(maxsize=float("inf"), ttl=60 * 60) # 1hr
eew_source: dict = config.get("eew_source")
self.__eew_source = (
None if eew_source.get("all") else [source for source, enable in eew_source.items() if enable]
)
self.notification_client = []
self.event_handlers = defaultdict(list)
self.__ready = asyncio.Event()
async def new_alert(self, data: dict):
"""Send a new EEW alert"""
eew = EEW.from_dict(data)
self.alerts[eew.id] = eew
self.logger.info(
"New EEW alert is detected!\n"
"--------------------------------\n"
f" ID: {eew.id} (Serial {eew.serial})\n"
f" Location: {eew.earthquake.location.display_name}({eew.earthquake.lon}, {eew.earthquake.lat})\n"
f"Magnitude: {eew.earthquake.mag}\n"
f" Depth: {eew.earthquake.depth}km\n"
f" Time: {eew.earthquake.time.strftime('%Y/%m/%d %H:%M:%S')}\n"
"--------------------------------"
)
eew.earthquake.calc_all_data_in_executor(self._loop)
# call custom notification client
for client in self.notification_client:
self._loop.create_task(client.send_eew(eew))
async def update_alert(self, data: dict):
"""Update an existing EEW alert"""
eew = EEW.from_dict(data)
old_eew = self.alerts.get(eew.id)
self.alerts[eew.id] = eew
self.logger.info(
"EEW alert updated\n"
"--------------------------------\n"
f" ID: {eew.id} (Serial {eew.serial})\n"
f" Location: {eew.earthquake.location.display_name}({eew.earthquake.lon:.2f}, {eew.earthquake.lat:.2f})\n"
f"Magnitude: {eew.earthquake.mag}\n"
f" Depth: {eew.earthquake.depth}km\n"
f" Time: {eew.earthquake.time.strftime('%Y/%m/%d %H:%M:%S')}\n"
"--------------------------------"
)
if old_eew is not None:
old_eew.earthquake._calc_task.cancel()
eew.earthquake.calc_all_data_in_executor(self._loop)
# call custom notification client
for client in self.notification_client:
self._loop.create_task(client.update_eew(eew))
async def _emit(self, event: str, *args):
for handler in self.event_handlers[event]:
self._loop.create_task(handler(*args))
def add_listener(self, event: WebSocketEvent, handler: Any):
"""Add a listener for a specific event"""
self.event_handlers[event].append(handler)
return self
async def on_eew(self, data: dict):
"""Handle EEW event"""
if self.__eew_source is not None and data["author"] not in self.__eew_source:
# source is None: all source
# source is list: only specified source
return
self.alerts.expire()
eew = self.alerts.get(data["id"])
if eew is None:
await self.new_alert(data)
elif data["serial"] > eew.serial:
await self.update_alert(data)
async def connect(self):
"""Connect to ExpTech API and start receiving data"""
if self.websocket_config:
# await self._http.test_ws_latencies()
# self._http.switch_ws_node("fastest")
await self.ws_connect()
else:
await self._get_eew_loop()
async def ws_connect(self):
"""Connect to WebSocket"""
in_reconnect = False
_reconnect_delay = 0
task: asyncio.Task = None
while not self.__closed:
try:
if not self._ws or self._ws.closed:
self.logger.debug("Connecting to WebSocket...")
self._ws = await self._http.ws_connect(self)
if not self.__ready.is_set():
self.logger.info(
"ExpTech WebSocket is ready\n"
"--------------------------------------------------\n"
f"Subscribed services: {', '.join(self._ws.subscribed_services)}\n"
"--------------------------------------------------"
)
self.__ready.set()
elif in_reconnect:
self.logger.info(
"ExpTech WebSocket successfully reconnect\n"
"--------------------------------------------------\n"
f"Subscribed services: {', '.join(self._ws.subscribed_services)}\n"
"--------------------------------------------------"
)
if task:
task.cancel()
in_reconnect = False
_reconnect_delay = 0
while True:
await self._ws.pool_event()
except AuthorizationFailed:
await self.close()
self.logger.warning("Authorization failed, switching to HTTP client")
self.websocket_config = None
await self.connect()
return
except WebSocketReconnect as e:
if e.reopen and self._ws and not self._ws.closed:
await self._ws.close()
self.logger.exception(f"Attempting a reconnect in {_reconnect_delay}s: {e.reason}")
except Exception as e:
self.logger.exception(
f"An unhandleable error occurred, reconnecting in {_reconnect_delay}s", exc_info=e
)
# use http client while reconnecting
if not task or task.done():
task = self._loop.create_task(self._get_eew_loop())
in_reconnect = True
if _reconnect_delay < 600: # max reconnect delay 10min
_reconnect_delay += 10
await asyncio.sleep(_reconnect_delay)
self._http.switch_ws_node()
async def get_eew(self):
try:
data: list[dict] = await self._http.get("/eq/eew")
except Exception as e:
self.logger.exception("Fail to get eew data.", exc_info=e)
return
for d in data:
await self.on_eew(d)
async def _get_eew_loop(self):
self.logger.info("ExpTech HTTP client is ready")
self.__ready.set()
task: asyncio.Task = None
while True:
try:
if not task or task.done():
task = self._loop.create_task(self.get_eew())
await asyncio.sleep(0.5)
except asyncio.CancelledError:
return
async def close(self):
"""Close the websocket"""
self._reconnect = False
self.__closed = True
if self._ws:
await self._ws.close()
def closed(self):
"""Whether the websocket is closed"""
return self.__closed
async def start(self):
"""
Start the client.
Note: This coro won't finish forever until user interrupt it.
"""
self.logger.info("Starting ExpTech API Client...")
# test latencies
# await self._http.test_api_latencies()
# self._http.switch_api_node("fastest")
self.add_listener(WebSocketEvent.EEW.value, self.on_eew)
for client in self.notification_client:
self._loop.create_task(client.start())
# TODO: wait until notification client ready
await self.connect()
def run(self):
"""
Start the client.
Note: This is a blocking call. If you want to control your own event loop, use `start` instead.
"""
try:
self._loop.create_task(self.start())
self._loop.run_forever()
except KeyboardInterrupt:
self._loop.run_until_complete(self.close())
self._loop.stop()
finally:
self.logger.info("ExpTech API client has been stopped.")
async def wait_until_ready(self):
"""Wait until the API client is ready"""
await self.__ready.wait()
def load_notification_client(self, path: str, is_module: bool = False):
"""Load a notification client"""
module_path = path + (".register" if is_module else "")
module_name = path.split(".")[-1]
try:
self.logger.debug(f"Importing {module_path}...")
module = importlib.import_module(module_path)
register_func = getattr(module, "register", None)
if register_func is None:
self.logger.debug(
f"Ignoring registering {module_name}: No register function found in {module_path}"
)
return
namespace = getattr(module, "NAMESPACE", module_name)
_config = self.config.get(namespace)
if _config is None:
self.logger.warning(
f"Ignoring registering {module_name}: The expected config namespace '{namespace}' was not found."
)
return
self.logger.debug(f"Registering {module_path}...")
notification_client = register_func(_config, self.logger)
if not issubclass(type(notification_client), BaseNotificationClient):
self.logger.debug(
f"Ignoring registering {module_name}: Unsupported return type '{type(notification_client).__name__}'"
)
return
self.notification_client.append(notification_client)
self.logger.info(f"Registered notification client '{module_name}' successfully")
except ModuleNotFoundError as e:
if e.name == module_path:
self.logger.error(f"Failed to import '{module_name}': '{module_path}' not found")
else:
self.logger.error(
f"Failed to registered '{module_name}' (most likely lacking of dependencies)"
)
except Exception as e:
self.logger.exception(f"Failed to import {module_path}", exc_info=e)
def load_notification_clients(self, path: str):
"""Load all notification clients in the specified directory"""
path_split = re.compile(r"[\\/]")
for _path in os.scandir(path):
if _path.name.startswith("__"):
continue
if _path.is_file() and _path.name.endswith(".py"):
module_path = re.sub(path_split, ".", _path.path)[:-3]
is_module = False
elif _path.is_dir():
module_path = re.sub(path_split, ".", _path.path)
is_module = True
else:
self.logger.debug(f"Ignoring importing unknown file type: {_path.name}")
continue
self.load_notification_client(module_path, is_module=is_module)
| 12,533 | Python | .py | 291 | 31.694158 | 121 | 0.555911 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,816 | websocket.py | watermelon1024_EEW/src/client/websocket.py | import asyncio
import json
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional, Union
import aiohttp
from ..logging import Logger
if TYPE_CHECKING:
from .client import Client
class AuthorizationFailed(Exception):
"""Represents an authorization failure."""
class WebSocketReconnect(Exception):
"""Represents a websocket reconnect signal."""
def __init__(
self, reason: Any = None, reopen: bool = False, source_exc: Exception = None, *args: object
) -> None:
"""Represents a websocket reconnect signal.
:param reason: The reason to reconnect the websocket, defaults to None
:type reason: Any
:param reopen: Whether to reopen the websocket, defaults to False
:type reopen: bool
"""
super().__init__(*args)
self.reason = reason
self.reopen = reopen
self.source_exc = source_exc
class WebSocketClosure(Exception):
"""Represents a websocket closed signal."""
class WebSocketException(Exception):
"""Represents a websocket exception."""
def __init__(self, message: aiohttp.WSMessage, description: str = None, *args: object) -> None:
"""
Represents a websocket exception.
:param message: The websocket message that caused the exception.
:type message: aiohttp.WSMessage
:param description: The description of the exception.
:type description: str
"""
super().__init__(*args)
self.description = description
self.message = message
class WebSocketEvent(Enum):
"""Represent the websocket event"""
EEW = "eew"
INFO = "info"
NTP = "ntp"
REPORT = "report"
RTS = "rts"
RTW = "rtw"
VERIFY = "verify"
CLOSE = "close"
ERROR = "error"
class WebSocketService(Enum):
"""Represent the supported websokcet service"""
REALTIME_STATION = "trem.rts"
"即時地動資料"
REALTIME_WAVE = "trem.rtw"
"即時地動波形圖資料"
EEW = "websocket.eew"
"地震速報資料"
TREM_EEW = "trem.eew"
"TREM 地震速報資料"
REPORT = "websocket.report"
"中央氣象署地震報告資料"
TSUNAMI = "websocket.tsunami"
"中央氣象署海嘯資訊資料"
CWA_INTENSITY = "cwa.intensity"
"中央氣象署震度速報資料"
TREM_INTENSITY = "trem.intensity"
"TREM 震度速報資料"
class WebSocketConnectionConfig:
"""
Represents the configuration for the websocket connection.
"""
def __init__(
self,
key: str,
service: list[WebSocketService],
config: Optional[dict[WebSocketService, list[int]]] = None,
):
"""
:param key: Authentication key
:type key: str
:param service: The services to subscribe
:type service: list[SupportedService]
:param config: Configuration for each service, defaults to None
:type config: Optional[dict[SupportedService, list[int]]]
"""
self.key = key
self.service = service
self.config = config
def to_dict(self):
return {
"key": self.key,
"service": [service.value for service in self.service],
"config": self.config,
}
# WebSocketAuthenticationInfo = Union[dict[str, Union[int, list[SupportedService]]], dict[str, Union[int, str]]]
class ExpTechWebSocket(aiohttp.ClientWebSocketResponse):
"""
A websocket connection to the ExpTech API.
"""
__client: "Client"
_logger: Logger
config: WebSocketConnectionConfig
subscribed_services: list[Union[WebSocketService, str]]
__wait_until_ready: asyncio.Event
async def debug_receive(self, timeout: float | None = None) -> aiohttp.WSMessage:
msg = await super().receive(timeout)
self._logger.debug(f"Websocket received: {msg}")
return msg
async def debug_send_str(self, data: str, compress: int | None = None) -> None:
self._logger.debug(f"Websocket sending: {data}")
return await super().send_str(data, compress)
@classmethod
async def connect(cls, client: "Client", **kwargs):
"""
Connect to the websocket.
"""
self: cls = await client._http._session.ws_connect(client._http._current_ws_node, **kwargs)
self.__client = client
self._logger = client.logger
self.config = client.websocket_config
self.subscribed_services = []
if client.debug_mode:
self.receive = self.debug_receive
self.send_str = self.debug_send_str
self.__wait_until_ready = asyncio.Event()
await self.verify()
# while not self.__wait_until_ready.is_set():
# await self.pool_event()
return self
async def send_verify(self):
"""
Send the verify data to the websocket.
"""
data = self.config.to_dict()
data["type"] = "start"
await self.send_json(data)
async def verify(self):
"""
Verify the websocket connection.
:return the subscribed services.
:rtype: list[SupportedService]
"""
await self.send_verify()
data = await asyncio.wait_for(self.wait_for_verify(), timeout=60)
self.subscribed_services = data["list"]
self.__wait_until_ready.set()
return self.subscribed_services
async def wait_for_verify(self):
"""
Return websocket message data if verify successfully
:return: The data of the verify result
:rtype: dict
:raise AuthorizationFailed: If the API key is invalid.
:raise WebSocketReconnect: If the API key is already in used.
:raise WebSocketClosure: If the websocket is closed.
"""
while True:
msg = await self.receive_and_check()
data = json.loads(msg.data)
if data.get("type") == WebSocketEvent.VERIFY.value:
await self.send_verify()
if data.get("type") != WebSocketEvent.INFO.value:
continue
data = data["data"]
message = data.get("message")
code = data.get("code")
if code == 200:
# subscribe successfully
return data
elif code == 400:
# api key in used
raise WebSocketReconnect("API key is already in used", reopen=True)
elif code == 401:
# no api key or invalid api key
raise AuthorizationFailed(message)
elif code == 403:
# vip membership expired
raise AuthorizationFailed(message)
elif code == 429:
raise WebSocketReconnect("Rate limit exceeded", reopen=True)
async def wait_until_ready(self):
"""Wait until websocket client is ready"""
await self.__wait_until_ready.wait()
async def receive_and_check(self):
"""
Receives message and check if it is handleable.
:return: A handleable message.
:rtype: aiohttp.WSMessage
:raise WebSocketException: If the message is not handleable.
:raise WebSocketClosure: If the websocket is closed.
"""
msg = await self.receive(timeout=90)
if msg.type is aiohttp.WSMsgType.TEXT:
return msg
elif msg.type is aiohttp.WSMsgType.BINARY:
return msg
elif msg.type is aiohttp.WSMsgType.ERROR:
raise WebSocketException(msg)
elif msg.type in (aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.CLOSING, aiohttp.WSMsgType.CLOSE):
raise WebSocketClosure
else:
raise WebSocketException(msg, "Websocket received unhandleable message")
async def _handle(self, msg: aiohttp.WSMessage):
if msg.type is aiohttp.WSMsgType.TEXT:
await self._handle_json(json.loads(msg.data))
elif msg.type is aiohttp.WSMsgType.BINARY:
await self._handle_binary(msg.data)
async def _handle_binary(self, data: bytes):
pass
async def _handle_json(self, data: dict):
event_type = data.get("type")
if event_type == WebSocketEvent.VERIFY.value:
await self.verify()
elif event_type == WebSocketEvent.INFO.value:
data_ = data.get("data", {})
code = data_.get("code")
if code == 503:
await asyncio.sleep(5)
await self.verify()
else:
await self._emit(WebSocketEvent.INFO.value, data_)
elif event_type == "data":
time = data.get("time")
data_ = data.get("data", {})
data_["time"] = time
data_type = data_.get("type")
if data_type:
await self._emit(data_type, data_)
elif event_type == WebSocketEvent.NTP.value:
await self._emit(WebSocketEvent.NTP.value, data)
@property
def _emit(self):
return self.__client._emit
async def pool_event(self):
try:
msg = await self.receive_and_check()
await self._handle(msg)
except WebSocketReconnect:
raise
except WebSocketClosure as e:
raise WebSocketReconnect("Websocket closed", reopen=True, source_exc=e) from e
except asyncio.TimeoutError as e:
raise WebSocketReconnect("Websocket message received timeout", reopen=False) from e
except WebSocketException as e:
self._logger.error(f"Websocket received an error: {e.description or e.message.data}", exc_info=e)
| 9,646 | Python | .py | 247 | 29.712551 | 112 | 0.618557 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,817 | http.py | watermelon1024_EEW/src/client/http.py | import asyncio
import random
import time
from typing import TYPE_CHECKING
import aiohttp
from ..logging import Logger
from .websocket import ExpTechWebSocket
if TYPE_CHECKING:
from .client import Client
class HTTPClient:
"""A HTTP client for interacting with ExpTech API."""
def __init__(
self,
logger: Logger,
debug: bool,
*,
domain: str = "exptech.dev",
api_version: int = 1,
session: aiohttp.ClientSession = None,
loop: asyncio.AbstractEventLoop = None,
):
self._logger = logger
self._debug_mode = debug
self.DOMAIN = domain
self.__API_VERSION = api_version
self.API_NODES = [
*(f"https://api-{i}.{self.DOMAIN}/api/v{api_version}" for i in range(1, 3)), # api-1 ~ api-2
*(f"https://lb-{i}.{self.DOMAIN}/api/v{api_version}" for i in range(1, 5)), # lb-1 ~ lb-4
]
self.__base_url = self.API_NODES[0]
self.node_latencies = [(node, float("inf")) for node in self.API_NODES]
self.__current_node_index = 0
self.WS_NODES = [f"wss://lb-{i}.{self.DOMAIN}/websocket" for i in range(1, 5)] # lb-1 ~ lb-4
self._current_ws_node = self.WS_NODES[0]
self.ws_node_latencies = [(node, float("inf")) for node in self.WS_NODES]
self._current_ws_node_index = 0
self._loop = loop or asyncio.get_event_loop()
self._session = session or aiohttp.ClientSession(
loop=self._loop,
headers={"User-Agent": "EEW/1.0.0 (https://github.com/watermelon1024/EEW)"},
)
self._session._ws_response_class = ExpTechWebSocket
# http api node
async def _test_latency(self, url: str) -> float:
try:
start = time.time()
async with self._session.get(url) as response:
if response.ok:
latency = time.time() - start
return latency
else:
return float("inf")
except Exception:
return float("inf")
async def test_api_latencies(self):
"""Test all API nodes latencies"""
latencies = [(node, await self._test_latency(f"{node}/eq/eew")) for node in self.API_NODES]
latencies.sort(key=lambda x: x[1])
self.node_latencies = latencies
return latencies
def switch_api_node(self, type_or_url: str = "next"):
"""
Switch the API node.
:param type_or_url: The type or url of the API node. Type supports `next`, `fastest` and `random`.
:type type_or_url: str
"""
if type_or_url == "next":
idx = (self.__current_node_index + 1) % len(self.node_latencies)
elif type_or_url == "fastest":
idx = 0
elif type_or_url == "random":
idx = random.randint(0, len(self.node_latencies) - 1)
else:
idx = None
if idx is None:
url = type_or_url
else:
url = self.node_latencies[idx][0]
self.__current_node_index = idx
self.__base_url = url
self._logger.info(f"Switched to API node: {url}")
async def request(self, method: str, path: str, *, json: bool = True, retry: int = 0, **kwargs):
"""
Make a request to the API.
:param method: The HTTP method to use.
:type method: str
:param path: The path to request.
:type path: str
:param json: Whether to return the response as JSON.
:type json: bool
:param retry: The number of retries if the request fails.
:type retry: int
:param kwargs: Additional keyword arguments to pass to the request.
:type kwargs: dict
:return: The response from the API.
:rtype: str | dict | Any
"""
url = self.__base_url + path
try:
async with self._session.request(method, url, **kwargs) as r:
resp = await r.json() if json else await r.text()
self._logger.debug(f"{method} {url} receive {r.status}: {resp}")
return resp
except Exception as e:
if isinstance(e, aiohttp.ContentTypeError):
self._logger.debug(
f"Fail to decode JSON when {method} {url} (receive {r.status}): {await r.text()}"
)
else:
self._logger.debug(f"Fail to {method} {url}: {e}")
self.switch_api_node()
if retry > 0:
await asyncio.sleep(1)
return await self.request(method, path, json=json, retry=retry - 1, **kwargs)
raise
async def get(self, path: str, retry: int = 0, **kwargs):
"""
Make a GET request to the API.
:param path: The path to request.
:type path: str
:param retry: The number of retries if the request fails.
:type retry: int
:param kwargs: Additional keyword arguments to pass to the request.
:type kwargs: dict
:return: The response from the API.
:rtype: str | dict | Any
"""
return await self.request("GET", path, retry=retry, **kwargs)
async def post(self, path: str, data: dict, retry: int = 0, **kwargs):
"""
Make a POST request to the API.
:param path: The path to request.
:type path: str
:param data: The data to send in the request body.
:type data: dict
:param retry: The number of retries if the request fails.
:type retry: int
:param kwargs: Additional keyword arguments to pass to the request.
:type kwargs: dict
:return: The response from the API.
:rtype: str | dict | Any
"""
return await self.request("POST", path, data=data, retry=retry, **kwargs)
# websocket node
async def _test_ws_latency(self, url: str) -> float:
try:
async with self._session.ws_connect(url) as ws:
await ws.receive(timeout=5) # discard first ntp
start_time = time.time()
await ws.send_json({"type": "start"})
await ws.receive()
latency = time.time() - start_time
return latency
except Exception:
return float("inf")
async def test_ws_latencies(self):
"""Test all websocket nodes latencies"""
latencies = [(node, await self._test_ws_latency(node)) for node in self.WS_NODES]
latencies.sort(key=lambda x: x[1])
self.ws_node_latencies = latencies
return latencies
def switch_ws_node(self, type_or_url: str = "next"):
"""
Switch the websocket node.
:param type_or_url: The type or url of the websocket node. Type supports `next`, `fastest` and `random`.
:type type_or_url: str
"""
if type_or_url == "next":
idx = (self._current_ws_node_index + 1) % len(self.ws_node_latencies)
elif type_or_url == "fastest":
idx = 0
elif type_or_url == "random":
idx = random.randint(0, len(self.ws_node_latencies) - 1)
else:
idx = None
if idx is None:
url = type_or_url
else:
url = self.ws_node_latencies[idx][0]
self._current_ws_node_index = idx
self._current_ws_node = url
self._logger.info(f"Switched to websocket node: {url}")
async def ws_connect(self, client: "Client"):
"""
Connect to the websocket.
"""
if not self._current_ws_node:
self._current_ws_node = self.WS_NODES[0]
return await ExpTechWebSocket.connect(client)
| 7,733 | Python | .py | 189 | 30.920635 | 112 | 0.567722 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,818 | location.py | watermelon1024_EEW/src/earthquake/location.py | import json
from typing import Union
import geopandas as gpd
from ..utils import MISSING
class Location:
"""
A base class represents a location with longitude and latitude.
"""
__slots__ = ("_longitude", "_latitude")
def __init__(self, longitude: float, latitude: float):
"""
Initialize a Location object.
:param longitude: The longitude of the location.
:type longitude: float
:param latitude: The latitude of the location.
:type latitude: float
"""
self._longitude = longitude
self._latitude = latitude
@property
def lon(self):
"""The longitude of the location."""
return self._longitude
@property
def lat(self):
"""The latitude of the location."""
return self._latitude
def __str__(self):
return f"({self._longitude}, {self._latitude})"
def __repr__(self):
return f"Location({self._longitude}, {self._latitude})"
def __iter__(self):
return iter((self._longitude, self._latitude))
def __eq__(self, other):
return (
issubclass(other, Location)
and self._longitude == other._longitude
and self._latitude == other._latitude
)
def __hash__(self):
return hash((self._longitude, self._latitude))
def to_dict(self) -> dict[str, float]:
"""
Return the location as a dictionary.
:return: The location as a dictionary.
:rtype: dict[str, float]
"""
return {"longitude": self._longitude, "latitude": self._latitude}
class EarthquakeLocation(Location):
"""
Represents a earthquake location.
"""
__slots__ = ("_longitude", "_latitude", "_display_name")
def __init__(self, longitude: float, latitude: float, display: str = MISSING):
"""
Initialize a earthquake location object.
:param longitude: The longitude of the location.
:type longitude: float
:param latitude: The latitude of the location.
:type latitude: float
:param display: The display name of the location.
:type display: str
"""
super().__init__(longitude, latitude)
self._display_name = display
@property
def display_name(self):
"""The display name of the location."""
return self._display_name
class RegionLocation(Location):
"""
Represents a region with longitude, latitude, region code and name.
"""
__slots__ = ("_longitude", "_latitude", "_code", "_name", "_city", "_area", "_site_effect")
def __init__(
self,
longitude: float,
latitude: float,
code: int,
name: str = MISSING,
city: str = MISSING,
area: str = MISSING,
site_effect: float = MISSING,
):
"""
Initialize the region object.
:param longitude: The longitude of the region.
:type longitude: float
:param latitude: The latitude of the region.
:type latitude: float
:param code: The identifier of the region.
:type code: int
:param name: The name of the region.
:type name: str
:param city: The city of the region.
:type city: str
:param area: The area of the region.
:type area: str
:param site_effect: The site effect of the region.
:type site_effect: float
"""
super().__init__(longitude, latitude)
self._code = code
self._name = name
self._city = city
self._area = area
self._site_effect = site_effect
@property
def lon(self):
"""The longitude of the location."""
return self._longitude
@property
def lat(self):
"""The latitude of the location."""
return self._latitude
@property
def code(self):
"""The identifier of the location."""
return self._code
@property
def name(self):
"""The name of the location."""
return self._name
@property
def city(self):
"""The city of the location."""
return self._city
@property
def area(self):
"""The area of the location."""
return self._area
@property
def side_effect(self):
"""The site effect of the location."""
return self._site_effect
def __str__(self):
return f"{self._name}({self._longitude}, {self._latitude})"
def __repr__(self):
return f"RegionLocation({self._name} at ({self._longitude}, {self._latitude})"
def _parse_region_dict(
data: dict[str, dict[str, dict[str, Union[int, float, str]]]]
) -> dict[int, RegionLocation]:
all_regions = {}
for city, regions in data.items():
for name, d in regions.items():
all_regions[d["code"]] = RegionLocation(
d["lon"], d["lat"], d["code"], name, city, d.get("area"), d.get("site")
)
return all_regions
def _group_region_by_city(regions: dict[int, RegionLocation]) -> dict[str, list[RegionLocation]]:
grouped_regions: dict[str, list[RegionLocation]] = {}
for region in regions.values():
grouped_regions.setdefault(region.city, []).append(region)
return grouped_regions
TAIWAN_CENTER = Location(120.982025, 23.973875)
"The center of Taiwan"
with open("asset/region.json", "r", encoding="utf-8") as f:
REGIONS: dict[int, RegionLocation] = _parse_region_dict(json.load(f))
REGIONS_GROUP_BY_CITY: dict[str, list[RegionLocation]] = _group_region_by_city(REGIONS)
with open("asset/town_map.json", "r", encoding="utf-8") as f:
_raw_geo_data = json.load(f)["features"]
TOWN_DATA: gpd.GeoDataFrame = gpd.GeoDataFrame.from_features(_raw_geo_data)
TOWN_RANGE = {
int(d["id"]): TOWN_DATA[TOWN_DATA["TOWNCODE"] == d["properties"]["TOWNCODE"]]
for d in _raw_geo_data
if d["id"].isdigit()
}
with open("asset/country_map.json", "r", encoding="utf-8") as f:
_raw_geo_data = json.load(f)["features"]
COUNTRY_DATA: gpd.GeoDataFrame = gpd.GeoDataFrame.from_features(_raw_geo_data)
| 6,138 | Python | .py | 170 | 28.8 | 97 | 0.607022 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,819 | eew.py | watermelon1024_EEW/src/earthquake/eew.py | import asyncio
from datetime import datetime
from ..utils import MISSING
from .location import REGIONS_GROUP_BY_CITY, EarthquakeLocation, RegionLocation
from .map import Map
from .model import (
Intensity,
RegionExpectedIntensity,
WaveModel,
calculate_expected_intensity_and_travel_time,
get_wave_model,
)
PROVIDER_DISPLAY = {
"cwa": "中央氣象署",
"trem": "TREM 臺灣即時地震監測",
"kam": "기상청 날씨누리",
"jma": "気象庁",
"nied": "防災科研",
"scdzj": "四川省地震局",
}
class EarthquakeData:
"""
Represents the data of an earthquake.
"""
__slots__ = (
"_location",
"_magnitude",
"_depth",
"_time",
"_max_intensity",
"_model",
"_calc_task",
"_city_max_intensity",
"_expected_intensity",
"_p_arrival_distance_interp_func",
"_s_arrival_distance_interp_func",
"_map",
"_intensity_calculated",
)
def __init__(
self,
location: EarthquakeLocation,
magnitude: float,
depth: int,
time: datetime,
max_intensity: Intensity = MISSING,
) -> None:
"""
Initialize an earthquake data object.
:param location: The location of the earthquake.
:type location: EarthquakeLocation
:param magnitude: The magnitude of the earthquake.
:type magnitude: float
:param depth: The depth of the earthquake in km.
:type depth: int
:param time: The time when earthquake happened.
:type time: datetime
:param max_intensity: The maximum intensity of the earthquake.
:type max_intensity: Intensity
"""
self._location = location
self._magnitude = magnitude
self._depth = depth
self._time = time
self._max_intensity = max_intensity
self._model = get_wave_model(depth)
self._intensity_calculated = asyncio.Event()
self._calc_task: asyncio.Future = None
self._city_max_intensity: dict[str, RegionExpectedIntensity] = None
self._expected_intensity: dict[int, RegionExpectedIntensity] = None
self._map: Map = Map(self)
@property
def location(self) -> EarthquakeLocation:
"""
The location object of the earthquake.
"""
return self._location
@property
def lon(self) -> float:
"""
The longitude of the earthquake.
"""
return self._location.lon
@property
def lat(self) -> float:
"""
The latitude of the earthquake.
"""
return self._location.lat
@property
def mag(self) -> float:
"""
The magnitude of the earthquake.
"""
return self._magnitude
@property
def depth(self) -> int:
"""
The depth of the earthquake in km.
"""
return self._depth
@property
def time(self) -> datetime:
"""
The time when earthquake happened.
"""
return self._time
@property
def max_intensity(self) -> Intensity:
"""
The maximum intensity of the earthquake.
"""
return self._max_intensity
@property
def wave_model(self) -> WaveModel:
"""
The wave model of the earthquake.
"""
return self._model
@property
def expected_intensity(self) -> dict[int, RegionExpectedIntensity]:
"""
The expected intensity of the earthquake (if have been calculated).
"""
return self._expected_intensity
@property
def city_max_intensity(self) -> dict[str, RegionExpectedIntensity]:
"""
The maximum intensity of the earthquake in each city (if have been calculated).
"""
return self._city_max_intensity
@property
def map(self) -> Map:
"""
The intensity map object of the earthquake (if have been calculated).
"""
return self._map
@classmethod
def from_dict(cls, data: dict) -> "EarthquakeData":
"""
Create an earthquake data object from the dictionary.
:param data: The data of the earthquake from the api.
:type data: dict
:return: The earthquake data object.
:rtype: EarthquakeData
"""
return cls(
location=EarthquakeLocation(data["lon"], data["lat"], data.get("loc", MISSING)),
magnitude=data["mag"],
depth=data["depth"],
time=datetime.fromtimestamp(data["time"] / 1000),
max_intensity=Intensity(i) if (i := data.get("max")) is not None else MISSING,
)
def calc_expected_intensity(
self, regions: list[RegionLocation] = MISSING
) -> dict[int, RegionExpectedIntensity]:
"""
Calculate the expected intensity of the earthquake.
"""
intensities = calculate_expected_intensity_and_travel_time(self, regions)
self._expected_intensity = dict(intensities)
self._city_max_intensity = {
city: max(city_intensities, key=lambda x: x.intensity._float_value)
for city, regions in REGIONS_GROUP_BY_CITY.items()
if (
city_intensities := [
intensity
for region in regions
if (intensity := self._expected_intensity.get(region.code))
]
)
}
self._intensity_calculated.set()
return self._expected_intensity
def calc_all_data(self):
try:
self._intensity_calculated.clear()
self.calc_expected_intensity()
self.map.draw()
except asyncio.CancelledError:
self._map._drawn = False
pass
except Exception:
self._calc_task.cancel()
finally:
pass
def calc_all_data_in_executor(self, loop: asyncio.AbstractEventLoop):
if self._calc_task is None:
self._calc_task = loop.run_in_executor(None, self.calc_all_data)
return self._calc_task
async def wait_until_intensity_calculated(self):
await self._calc_task
class Provider:
"""
Represents the data of an EEW provider.
"""
__slots__ = ("_name",)
def __init__(self, name: str) -> None:
"""
Initialize an EEW provider data object.
:param name: The name of the provider.
:type name: str
"""
self._name = name
@property
def name(self) -> str:
"""
The name of the provider.
"""
return self._name
@property
def display_name(self) -> str:
"""
The display name of the provider.
"""
return PROVIDER_DISPLAY.get(self._name, self._name)
class EEW:
"""
Represents an earthquake early warning event.
"""
__slots__ = ("_id", "_serial", "_final", "_earthquake", "_provider", "_time")
def __init__(
self,
id: str,
serial: int,
final: bool,
earthquake: EarthquakeData,
provider: Provider,
time: datetime,
) -> None:
"""
Initialize an earthquake early warning event.
:param id: The identifier of the EEW.
:type id: str
:param serial: The serial of the EEW.
:type serial: int
:param final: Whether the EEW is final report.
:type final: bool
:param earthquake: The data of the earthquake.
:type earthquake: EarthquakeData
:param provider: The provider of the EEW.
:type provider: Provider
:param time: The time when the EEW published.
:type time: datetime
"""
self._id = id
self._serial = serial
self._final = final
self._earthquake = earthquake
self._provider = provider
self._time = time
@property
def id(self) -> str:
"""
The identifier of the EEW.
"""
return self._id
@property
def serial(self) -> int:
"""
The serial of the EEW.
"""
return self._serial
@property
def final(self) -> bool:
"""
Whether the EEW is final report.
"""
return self._final
@property
def earthquake(self) -> EarthquakeData:
"""
The earthquake data of the EEW.
"""
return self._earthquake
@property
def provider(self) -> Provider:
"""
The provider of the EEW.
"""
return self._provider
@property
def time(self) -> datetime:
"""
The datetime object of the EEW.
"""
return self._time
@classmethod
def from_dict(cls, data: dict) -> "EEW":
"""
Create an EEW object from the data dictionary.
:param data: The data of the earthquake from the api.
:type data: dict
:return: The EEW object.
:rtype: EEW
"""
return cls(
id=data["id"],
serial=data["serial"],
final=bool(data["final"]),
earthquake=EarthquakeData.from_dict(data=data["eq"]),
provider=Provider(data["author"]),
time=datetime.fromtimestamp(data["time"] / 1000),
)
| 9,350 | Python | .py | 304 | 22.236842 | 92 | 0.575425 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,820 | map.py | watermelon1024_EEW/src/earthquake/map.py | import io
from typing import TYPE_CHECKING
import matplotlib.pyplot as plt
if TYPE_CHECKING:
from earthquake.eew import EarthquakeData
import warnings
import matplotlib.image as mpimg
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from .location import COUNTRY_DATA, TAIWAN_CENTER, TOWN_DATA, TOWN_RANGE
plt.ioff()
plt.switch_backend("AGG")
P_WAVE_COLOR = "orange"
S_WAVE_COLOR = "red"
INTENSITY_COLOR: dict[int, str] = {
0: None,
1: "#387FFF",
2: "#244FD0",
3: "#35BF56",
4: "#F8F755",
5: "#FFC759",
6: "#FF9935",
7: "#DF443B",
8: "#7B170F",
9: "#7237C1",
}
legend_img = mpimg.imread("asset/map_legend.png")
legend_offset = OffsetImage(legend_img, zoom=0.5)
class Map:
"""
Represents the map for earthquake.
"""
__slots__ = ("_eq", "_image", "fig", "ax", "_drawn", "p_wave", "s_wave")
def __init__(self, earthquake: "EarthquakeData"):
"""
Initialize the map.
:param lon: longitude of the epicenter
:param lat: latitude of the epicenter
"""
self._eq = earthquake
self._image = None
self._drawn: bool = False
"Whether the map has been drawn"
self.fig: plt.Figure = None
"The figure object of the map"
self.ax: plt.Axes = None
"The axes of the figure"
self.p_wave: plt.Circle = None
"The p-wave of the earthquake"
self.s_wave: plt.Circle = None
"The s-wave of the earthquake"
def init_figure(self):
"""
Initialize the figure of the map.
"""
self.fig, self.ax = plt.subplots(figsize=(4, 6))
self.fig.patch.set_alpha(0)
self.ax.set_axis_off()
@property
def image(self) -> io.BytesIO:
"""
The map image of the earthquake.
"""
return self._image
def draw(self):
"""
Draw the map of the earthquake if intensity have been calculated.
"""
if self._eq._expected_intensity is None:
raise RuntimeError("Intensity have not been calculated yet.")
if self.fig is None:
self.init_figure()
# map boundary
zoom = 1 # TODO: change zoom according to magnitude
mid_lon, mid_lat = (TAIWAN_CENTER.lon + self._eq.lon) / 2, (TAIWAN_CENTER.lat + self._eq.lat) / 2
lon_boundary, lat_boundary = 1.6 * zoom, 2.4 * zoom
min_lon, max_lon = mid_lon - lon_boundary, mid_lon + lon_boundary
min_lat, max_lat = mid_lat - lat_boundary, mid_lat + lat_boundary
self.ax.set_xlim(min_lon, max_lon)
self.ax.set_ylim(min_lat, max_lat)
TOWN_DATA.plot(ax=self.ax, facecolor="lightgrey", edgecolor="black", linewidth=0.22 / zoom)
for code, region in self._eq._expected_intensity.items():
if region.intensity.value > 0:
TOWN_RANGE[code].plot(ax=self.ax, color=INTENSITY_COLOR[region.intensity.value])
COUNTRY_DATA.plot(ax=self.ax, edgecolor="black", facecolor="none", linewidth=0.64 / zoom)
# draw epicenter
self.ax.scatter(
self._eq.lon,
self._eq.lat,
marker="x",
color="red",
s=160 / zoom,
linewidths=2.5 / zoom,
)
# add legend
if self._eq.lon > TAIWAN_CENTER.lon:
x = 1
align = 0.8
else:
x = 0
align = 0.2
self.ax.add_artist(
AnnotationBbox(
OffsetImage(legend_img, zoom=0.5),
(x, 0),
xycoords="axes fraction",
boxcoords="axes fraction",
box_alignment=(align, 0.2),
frameon=False,
)
)
self._drawn = True
def draw_wave(self, time: float, waves: str = "all"):
"""
Draw the P and S wave if possible.
:param time: the travel time in seconds of the wave to draw
:type time: float
:param waves: type of the wave to draw, can be `P`, `S` or `all` (case-insensitive), defaults to `all`
:type waves: str
"""
if not self._drawn:
warnings.warn("Map have not been drawn yet, background will be empty.")
waves = waves.lower()
if waves == "all":
waves = "ps"
p_dis, s_dis = self._eq._model.get_arrival_distance(time)
if "p" in waves:
if self.p_wave is not None:
self.p_wave.remove()
self.p_wave = plt.Circle(
(self._eq.lon, self._eq.lat),
p_dis,
color=P_WAVE_COLOR,
fill=False,
linewidth=1.5,
)
self.ax.add_patch(self.p_wave)
if "s" in waves:
if self.s_wave is not None:
self.s_wave.remove()
self.s_wave = plt.Circle(
(self._eq.lon, self._eq.lat),
s_dis,
color=S_WAVE_COLOR,
fill=False,
linewidth=1.5,
)
self.ax.add_patch(self.s_wave)
def save(self):
if self.fig is None:
raise RuntimeError("Map have not been initialized yet.")
if not self._drawn:
warnings.warn("Map have not been drawn yet, it will be empty.")
_map = io.BytesIO()
self.fig.savefig(_map, format="png", bbox_inches="tight")
_map.seek(0)
self._image = _map
return self._image
| 5,515 | Python | .py | 157 | 25.745223 | 110 | 0.553618 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,821 | model.py | watermelon1024_EEW/src/earthquake/model.py | """
Earthquake expected data calculator.
Reference: https://github.com/ExpTechTW/TREM-tauri/blob/main/src/scripts/helper/utils.ts
"""
import math
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, OrderedDict
import numpy as np
from obspy.taup import tau
from scipy.interpolate import interp1d
from ..utils import MISSING
from .location import REGIONS, Location, RegionLocation
if TYPE_CHECKING:
from .eew import EarthquakeData
EARTH_RADIUS = 6371.008
INTENSITY_DISPLAY: dict[int, str] = {
0: "0級",
1: "1級",
2: "2級",
3: "3級",
4: "4級",
5: "5弱",
6: "5強",
7: "6弱",
8: "6強",
9: "7級",
}
SEISMIC_MODEL = tau.TauPyModel(cache=OrderedDict())
wave_model_cache: dict[int, "WaveModel"] = {}
class WaveModel:
"""
Represents a P and S waves model.
"""
def __init__(self, distance: np.ndarray, p_time: np.ndarray, s_time: np.ndarray) -> None:
"""
Initialize the travel time model.
Note: You should not create this class directly, instead, use the :method:`get_wave_model` method.
:param distance: The distance array in degrees.
:type distance: np.ndarray
:param p_time: The P wave travel time array in seconds.
:type p_time: np.ndarray
:param s_time: The S wave travel time array in seconds.
:type s_time: np.ndarray
"""
distance_in_radians = np.radians(distance)
self._p_arrival_distance_interp_func = interp1d(
p_time, distance, bounds_error=False, fill_value="extrapolate"
)
self._s_arrival_distance_interp_func = interp1d(
s_time, distance, bounds_error=False, fill_value="extrapolate"
)
self._p_travel_time_interp_func = interp1d(
distance_in_radians, p_time, bounds_error=False, fill_value="extrapolate"
)
self._s_travel_time_interp_func = interp1d(
distance_in_radians, s_time, bounds_error=False, fill_value="extrapolate"
)
def get_travel_time(self, distance: float) -> tuple[float, float]:
"""
Get the P and S waves travel time of the earthquake in seconds.
:param distance: The distance in radians.
:type distance: float
:return: P and S waves travel time in seconds.
:rtype: tuple[float, float]
"""
return (
float(self._p_travel_time_interp_func(distance)),
float(self._s_travel_time_interp_func(distance)),
)
def get_arrival_distance(self, time: float) -> tuple[float, float]:
"""
Get the P and S waves arrival distances of the earthquake in degrees.
:param time: The travel time in seconds.
:type time: float
:return: P and S waves arrival distances in degrees.
:rtype: tuple[float, float]
"""
return (
max(float(self._p_arrival_distance_interp_func(time)), 0),
max(float(self._s_arrival_distance_interp_func(time)), 0),
)
def get_wave_model(depth: float) -> WaveModel:
"""
Get the wave model for the given depth.
:param depth: The depth in kilometers.
:type depth: float
:return: The wave model.
:rtype: WaveModel
"""
cache = wave_model_cache.get(depth)
if cache is not None:
return cache
deg = []
p_time = []
s_time = []
for i in np.arange(0, 1, 0.01):
arrivals = SEISMIC_MODEL.get_travel_times(
source_depth_in_km=depth, distance_in_degree=i, phase_list=["p", "s"]
)
if len(arrivals) == 2:
deg.append(i)
p_time.append(arrivals[0].time)
s_time.append(arrivals[1].time)
model = WaveModel(np.array(deg), np.array(p_time), np.array(s_time))
wave_model_cache[depth] = model
return model
# pre fill wave model cache
for depth in range(10, 101, 10):
get_wave_model(depth)
class Intensity:
"""
Represents an intensity.
"""
__slots__ = ("_float_value", "_value", "_display")
def __init__(self, value: float) -> None:
"""
Initialize the intensity instance.
:param value: The intensity.
:type value: float
"""
self._float_value = value
self._value = round_intensity(value)
self._display = INTENSITY_DISPLAY[self._value]
@property
def value(self) -> int:
"""
The intensity.
"""
return self._value
@property
def display(self) -> str:
"""
Get the intensity display string.
"""
return self._display
def __str__(self) -> str:
return self._display
def __repr__(self) -> str:
return f"Intensity({self._float_value:.2f})"
class Distance:
"""
Represents a distance and travel time.
"""
__slots__ = ("_km", "_deg", "_p_arrival_time", "_s_arrival_time", "_p_travel_time", "_s_travel_time")
def __init__(
self,
in_km: float,
in_degrees: float,
p_arrival_time: datetime,
s_arrival_time: datetime,
p_travel_time: float,
s_travel_time: float,
) -> None:
"""
Initialize the distance instance.
:param in_km: The distance in kilometers.
:type in_km: float
:param in_degrees: The distance in degrees.
:type in_degrees: float
:param p_arrival_time: P wave arrival time.
:type p_arrival_time: datetime
:param s_arrival_time: S wave arrival time.
:type s_arrival_time: datetime
:param p_travel_time: P travel time.
:type p_travel_time: float
:param s_travel_time: S travel time.
:type s_travel_time: float
"""
self._km = in_km
self._deg = in_degrees
self._p_arrival_time = p_arrival_time
self._s_arrival_time = s_arrival_time
self._p_travel_time = p_travel_time
self._s_travel_time = s_travel_time
@property
def km(self) -> float:
"""
The distance from the hypocenter in km.
"""
return self._km
@property
def degrees(self) -> float:
"""
The distance from the epicenter in degrees.
"""
return self._deg
@property
def p_arrival_time(self) -> datetime:
"""
P wave arrival time.
"""
return self._p_arrival_time
@property
def s_arrival_time(self) -> datetime:
"""
S wave arrival time.
"""
return self._s_arrival_time
@property
def p_travel_time(self) -> float:
"""
P travel time.
"""
return self._p_travel_time
@property
def s_travel_time(self) -> float:
"""
S travel time.
"""
return self._s_travel_time
def p_left_time(self, now: datetime = MISSING) -> timedelta:
"""
P wave remaining time.
"""
return self._p_arrival_time - (now or datetime.now())
def s_left_time(self, now: datetime = MISSING) -> timedelta:
"""
S wave remaining time.
"""
return self._s_arrival_time - (now or datetime.now())
class RegionExpectedIntensity:
"""
Represents a region expected intensity.
"""
def __init__(self, region: RegionLocation, intensity: Intensity, distance: Distance) -> None:
"""
Initialize the region expected intensity instance.
:param region: The region.
:type region: RegionLocation
:param intensity: The intensity.
:type intensity: Intensity
:param distance: The distance.
:type distance: Distance
"""
self._region = region
self._intensity = intensity
self._distance = distance
@property
def region(self) -> RegionLocation:
"""
The region.
"""
return self._region
@property
def intensity(self) -> Intensity:
"""
The intensity.
"""
return self._intensity
@property
def distance(self) -> Distance:
"""
The distance.
"""
return self._distance
def __repr__(self) -> str:
return f"RegionExpectedIntensity({self._region}, {self._intensity}, {self._distance.s_arrival_time})"
class RegionExpectedIntensities(dict):
"""
Represents a dict like object of expected intensity for each region returned by :method:`calculate_expected_intensity_and_travel_time`.
"""
def __init__(self, intensities: dict[int, RegionExpectedIntensity]):
"""
Initialize the region expected intensities instance.
:param intensities: The intensities.
:type intensities: dict[int, RegionExpectedIntensity]
"""
super(RegionExpectedIntensities, self).__init__()
self.update(intensities)
def __getitem__(self, key: int) -> RegionExpectedIntensity:
return super().__getitem__(key)
def get(self, key: int, default=None) -> RegionExpectedIntensity:
return super().get(key, default)
def _calculate_distance(p1: Location, p2: Location) -> float:
"""
Calculate the distance between two points on the Earth's surface.
:param p1: The location object.
:type p1: Location
:param p2: The location object.
:type p2: Location
:return: The distance between the two points in radians.
:rtype: float
"""
# haversine formula
lon1 = math.radians(p1.lon)
lat1 = math.radians(p1.lat)
lon2 = math.radians(p2.lon)
lat2 = math.radians(p2.lat)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return c
def round_intensity(intensity: float) -> int:
"""
Round the floating-point intensity value to the nearest integer.
:param intensity: Floating-point intensity value.
:type intensity: float
:return: Rounded intensity value.
:rtype: int
"""
if intensity < 0:
return 0
elif intensity < 4.5:
return round(intensity)
elif intensity < 5:
return 5
elif intensity < 5.5:
return 6
elif intensity < 6:
return 7
elif intensity < 6.5:
return 8
else:
return 9
def _calculate_intensity(
hypocenter_distance: float,
magnitude: float,
depth: int,
site_effect: float = 1.751,
) -> float:
"""
Calculate the intensity of the earthquake of a given distance.
:param hypocenter_distance: Actual distance from the hypocenter in kilometers.
:type hypocenter_distance: float
:param magnitude: Magnitude of the earthquake.
:type magnitude: float
:param depth: Depth of the earthquake in kilometers.
:type depth: int
:param site_effect: Site effect factor, default is 1.751.
:type site_effect: float
:return: Estimated intensity.
:rtype: float
"""
pga = 1.657 * math.exp(1.533 * magnitude) * hypocenter_distance**-1.607 * (site_effect or 1.751)
i = 2 * math.log10(pga) + 0.7
if i > 3:
long = 10 ** (0.5 * magnitude - 1.85) / 2
x = max(hypocenter_distance - long, 3)
gpv600 = 10 ** (
0.58 * magnitude
+ 0.0038 * depth
- 1.29
- math.log10(x + 0.0028 * 10 ** (0.5 * magnitude))
- 0.002 * x
)
arv = 1.0
pgv400 = gpv600 * 1.31
pgv = pgv400 * arv
i = 2.68 + 1.72 * math.log10(pgv)
return i
def calculate_expected_intensity_and_travel_time(
earthquake: "EarthquakeData", regions: list[RegionLocation] = MISSING
) -> RegionExpectedIntensities:
"""
Calculate the expected intensity and travel time of the earthquake in different regions.
:param earthquake: EarthquakeData object containing earthquake information.
:type earthquake: EarthquakeData
:param regions: List of RegionLocation to calculate. If missing, it will calculate all existing regions.
:type regions: list[RegionLocation]
:return: RegionExpectedIntensities object containing expected intensity and travel time for each region.
:rtype: RegionExpectedIntensities
"""
_expected_intensity = {}
squared_depth = earthquake.depth**2
for region in regions or REGIONS.values():
distance_in_radians = _calculate_distance(earthquake, region)
distance_in_degrees = math.degrees(distance_in_radians)
real_distance_in_km = math.sqrt((distance_in_radians * EARTH_RADIUS) ** 2 + squared_depth)
intensity = _calculate_intensity(
real_distance_in_km, earthquake.mag, earthquake.depth, region.side_effect
)
p_travel, s_travel = earthquake.wave_model.get_travel_time(distance_in_radians)
_expected_intensity[region.code] = RegionExpectedIntensity(
region,
Intensity(intensity),
Distance(
real_distance_in_km,
distance_in_degrees,
earthquake.time + timedelta(seconds=p_travel),
earthquake.time + timedelta(seconds=s_travel),
p_travel,
s_travel,
),
)
intensities = RegionExpectedIntensities(_expected_intensity)
return intensities
| 13,360 | Python | .py | 387 | 27.142119 | 139 | 0.61634 | watermelon1024/EEW | 8 | 4 | 0 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,822 | main.py | Aspectise_Bundle-Sniper/main.py | import aiohttp
import asyncio
import os
from src import cprint
import json
from functions import checker, display, checker2
import traceback
import time
with open("settings.json", "r") as file:settings = json.load(file)
class Sniper:
def __init__(self) -> None:
self.cookie = settings["Cookie"]
self.user_id, self.username = asyncio.run(self.check_cookie())
self.webhook_url = settings["Webhook"]
seen = set()
self.bundles = list((key, value) for key, value in settings.get('Bundles', {}).items() if not (key in seen or seen.add(key)))
self.wait_checker = settings.get("Wait_Time").get("Checker_1")
self.wait_checker2 = settings.get("Wait_Time").get("Checker_2")
self.buys = 0
self.checks = 0
self.last_bought = None
self.buying = False
self.runtime = time.time()
asyncio.run(self.run())
async def check_cookie(self):
async with aiohttp.ClientSession(cookies={".ROBLOSECURITY": self.cookie}) as session:
async with session.get("https://users.roblox.com/v1/users/authenticated", ssl=False) as response:
if response.status == 200:
data = await response.json()
user_id = data.get("id")
name = data.get("name")
return user_id, name
else:
cprint.error("You have an invalid buy cookie in config.json")
os.system("pause")
os._exit(0)
async def webhook(self, data):
if self.webhook_url:
item_thumb = await self.get_thumb(data['id'])
payload = {"embeds": [{"title": f"New item purchased with Death Sniper", "description": f"**Successfully Purchased `{data['name']}`**\n**Price: `{data['name']}`**", "url": f"https://www.roblox.com/bundles/{id}", "color": 9109504, "footer": {"text": f"discord.gg/deathsniper","icon_url": "https://cdn-icons-png.flaticon.com/512/521/521269.png"}, "thumbnail": {"url": item_thumb}}]}
async with aiohttp.ClientSession() as session:
async with session.post(self.webhook_url, json=payload) as response:
if response.status != 204:
cprint.error(f"Failed to send webhook notification {response.status}.")
async def get_thumb(self, id):
async with aiohttp.ClientSession() as session:
async with session.get(f"https://thumbnails.roblox.com/v1/bundles/thumbnails?bundleIds={id}&size=420x420&format=Png&isCircular=false", ssl=False) as response:
if response.status == 200:
data = await response.json()
data = data.get("data")[0]
return data.get("imageUrl")
else:
return None
async def run(self):
while True:
try:
tasks = [asyncio.create_task(display.info(self)), asyncio.create_task(checker.start(self)), asyncio.create_task(checker2.start(self))]
await asyncio.gather(*tasks)
except Exception as e:
traceback.print_exc()
cprint.error(e)
if __name__ == "__main__":
Sniper() | 3,311 | Python | .py | 63 | 40.047619 | 393 | 0.581403 | Aspectise/Bundle-Sniper | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,823 | cprint.py | Aspectise_Bundle-Sniper/src/cprint.py | from rgbprint import Color
def info(text):
print(f"{Color(127, 127, 127)}INFO{Color(255, 255, 255)} | {text}")
def bought(text):
print(f"{Color(0, 255, 0)}BOUGHT{Color(255, 255, 255)} | {text}")
def error(text):
print(f"{Color(255, 0, 0)}ERROR{Color(255, 255, 255)} | {text}")
| 302 | Python | .py | 7 | 38.571429 | 72 | 0.622837 | Aspectise/Bundle-Sniper | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,824 | csrf.py | Aspectise_Bundle-Sniper/src/csrf.py | import requests
from rgbprint import Color
def get(cookie) -> str:
response = requests.post("https://economy.roblox.com/", cookies = {".ROBLOSECURITY": cookie})
xcsrf_token = response.headers.get("x-csrf-token")
if not xcsrf_token:
print(f"{Color(255, 0, 0)}ERROR{Color(255, 255, 255)} | An error occurred while getting the X-CSRF-TOKEN. Could be due to an invalid Roblox Cookie")
return None
return xcsrf_token | 453 | Python | .py | 9 | 44.777778 | 157 | 0.690745 | Aspectise/Bundle-Sniper | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,825 | checker.py | Aspectise_Bundle-Sniper/functions/checker.py | import aiohttp
import asyncio
import traceback
from src import cprint
from functions import purchase
async def start(self):
async with aiohttp.ClientSession(cookies={".ROBLOSECURITY": self.cookie}) as session:
while True:
try:
if not self.bundles:
return
bundle_ids = ','.join(id for id, price in self.bundles)
async with session.get(f"https://catalog.roblox.com/v1/bundles/details?bundleIds[]={bundle_ids}", ssl=False) as response:
self.checks += len(self.bundles)
if response.status == 200:
data = await response.json()
for items in data:
if items.get("collectibleItemDetail") and items.get("collectibleItemDetail").get("saleStatus") == "OnSale":
bundle_id = items.get("id")
bundle_price = items.get("collectibleItemDetail").get("price")
bundle_oprice = get_price(self, bundle_id)
if bundle_price <= bundle_oprice and not self.buying:
cprint.info("Detected bundle!")
bundle_name = items.get("name")
bundle_pid = items.get("product").get("id")
bundle_cpid = items.get("collectibleItemDetail").get("collectibleProductId")
bundle_cid = items.get("collectibleItemDetail").get("collectibleItemId")
creator_id = items.get("creator").get("id")
creator_type = items.get("creator").get("type")
pdata = {
"name": bundle_name,
"id": bundle_id,
"pid": bundle_pid,
"cid": bundle_cid,
"cpid": bundle_cpid,
"price": bundle_price,
"creatorId": creator_id,
"creatorType": creator_type
}
if not self.buying:
cprint.info(f"Buying {bundle_name}...")
self.buying = True
await purchase.start(self, session, pdata)
elif response.status == 429:
cprint.error("Rate limit")
await asyncio.sleep(10)
except Exception as e:
traceback.print_exc()
cprint.error(e)
finally:
await asyncio.sleep(self.wait_checker)
def get_price(self, bundle_id):
for id, price in self.bundles:
if int(id) == int(bundle_id):
return price
return None | 3,181 | Python | .py | 56 | 31.160714 | 138 | 0.430735 | Aspectise/Bundle-Sniper | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,826 | get_product.py | Aspectise_Bundle-Sniper/functions/get_product.py | async def get(session, item):
async with session.post("https://apis.roblox.com/marketplace-items/v1/items/details", json={"itemIds": [item]}) as response:
if response.status == 200:
data = await response.json()
data = data[0]
return data.get("collectibleProductId")
else:
return None | 358 | Python | .py | 8 | 34.5 | 129 | 0.60114 | Aspectise/Bundle-Sniper | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,827 | display.py | Aspectise_Bundle-Sniper/functions/display.py | from rgbprint import Color, gradient_print
import datetime
import time
import asyncio
import os
async def info(self):
banner = '''
██▄ ▄███▄ ██ ▄▄▄▄▀ ▄ █
█ █ █▀ ▀ █ █ ▀▀▀ █ █ █
█ █ ██▄▄ █▄▄█ █ ██▀▀█
█ █ █▄ ▄▀ █ █ █ █ █
███▀ ▀███▀ █ ▀ █
█ ▀
▀
'''
while True:
if self.buying:
return
os.system('cls' if os.name == 'nt' else 'clear')
gradient_print(banner, start_color=Color(0x999999), end_color=Color(0xCCCCCC))
print(f'''
{Color(0xCCCCCC)} > Bundle Sniper <
{Color(0x999999)} > Usernames : {Color(0xCCCCCC)}{self.username}
{Color(0x999999)} > Bundles : {Color(0xCCCCCC)}{len(self.bundles)}
{Color(0x999999)} > Run Time : {Color(0xCCCCCC)}{str(datetime.timedelta(seconds=(round(time.time() - self.runtime, 0))))}
{Color(0x999999)} > Buys : {Color(0xCCCCCC)}{self.buys}
{Color(0x999999)} > Last Bought : {Color(0xCCCCCC)}{self.last_bought}
{Color(0x999999)} > Checks : {Color(0xCCCCCC)}{self.checks}
''')
await asyncio.sleep(2)
| 1,942 | Python | .py | 30 | 40.533333 | 166 | 0.331621 | Aspectise/Bundle-Sniper | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,828 | checker2.py | Aspectise_Bundle-Sniper/functions/checker2.py | import aiohttp
import asyncio
import traceback
from src import cprint
from functions import purchase, get_product
async def start(self):
async with aiohttp.ClientSession(cookies={".ROBLOSECURITY": self.cookie}) as session:
while True:
try:
if not self.bundles:
return
for id, price in self.bundles:
async with session.get(f"https://catalog.roblox.com/v1/catalog/items/{id}/details?itemType=Bundle", ssl=False) as response:
self.checks += 1
if response.status == 200:
data = await response.json()
if data.get("isPurchasable"):
bundle_id = data.get("id")
bundle_price = data.get("price")
if bundle_price <= price and not self.buying:
cprint.info("Detected bundle!")
bundle_name = data.get("name")
bundle_pid = data.get("productId")
bundle_cid = data.get("collectibleItemId")
bundle_cpid = await get_product.get(session, bundle_cid)
creator_id = data.get("expectedSellerId")
creator_type = data.get("creatorType")
pdata = {
"name": bundle_name,
"id": bundle_id,
"pid": bundle_pid,
"cid": bundle_cid,
"cpid": bundle_cpid,
"price": bundle_price,
"creatorId": creator_id,
"creatorType": creator_type
}
if not self.buying:
self.buying = True
cprint.info(f"Buying {bundle_name}...")
await purchase.start(self, session, pdata)
elif response.status == 429:
await asyncio.sleep(10)
except Exception as e:
traceback.print_exc()
cprint.error(e)
finally:
await asyncio.sleep(self.wait_checker2)
| 2,651 | Python | .py | 48 | 27.770833 | 144 | 0.396002 | Aspectise/Bundle-Sniper | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,829 | purchase.py | Aspectise_Bundle-Sniper/functions/purchase.py | from src import cprint, csrf
import uuid
import traceback
import asyncio
async def start(self, session, pdata):
try:
xcsrf = csrf.get(self.cookie)
session.headers.update({"X-Csrf-Token": xcsrf})
payload = {
"collectibleItemId": pdata["cid"],
"expectedCurrency": 1,
"expectedPrice": pdata["price"],
"expectedPurchaserId": self.user_id,
"expectedPurchaserType": "User",
"expectedSellerId": pdata["creatorId"],
"expectedSellerType": pdata["creatorType"],
"idempotencyKey": str(uuid.uuid4()),
"collectibleProductId": pdata["cpid"]
}
async with session.post(f"https://apis.roblox.com/marketplace-sales/v1/item/{pdata['cid']}/purchase-item", json=payload, ssl=False) as response:
if response.status == 200:
data = await response.json()
if data.get("purchased"):
remove_id(self, pdata["id"])
cprint.bought(f"Successfully bought {pdata['name']} for {pdata['price']}!")
self.buys += 1
self.last_bought = pdata["name"]
self.webhook(pdata)
else:
cprint.error(f"Failed to buy bundle: {data}")
else:
data = await response.text()
if "AlreadyOwned" in data:
cprint.error(f"{pdata['name']} already owned.")
remove_id(self, pdata["id"])
return
else:
cprint.error(f"Failed to buy bundle: {data}")
except Exception as e:
cprint.error(e)
finally:
self.buying = False
def remove_id(self, id):
index = next((index for index, bundle in enumerate(self.bundles) if int(bundle[0]) == id), None)
if index is not None:
del self.bundles[index] | 1,940 | Python | .py | 46 | 29.76087 | 153 | 0.546321 | Aspectise/Bundle-Sniper | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,830 | conf.py | liebharc_homr/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../"))
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = ""
copyright = ""
author = ""
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.doctest",
"sphinx.ext.autosectionlabel",
"sphinx.ext.githubpages",
]
source_suffix = [".rst", ".md"]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_preprocess_types = False
napoleon_type_aliases = None
napoleon_attr_annotations = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 2,555 | Python | .py | 58 | 42.448276 | 83 | 0.704674 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,831 | test_bounding_boxes.py | liebharc_homr/tests/test_bounding_boxes.py | import unittest
import numpy as np
from homr.bounding_boxes import BoundingEllipse, RotatedBoundingBox
empty = np.array([])
class TestBoundingBoxes(unittest.TestCase):
unit_size = 3
def test_is_overlapping_rotated_box_extrapolated(self) -> None:
box1 = RotatedBoundingBox(((100, 200), (10, 10), 0), empty)
touching_box = RotatedBoundingBox(((110, 200), (10, 10), 0), empty)
samle_line_box = RotatedBoundingBox(((140, 200), (10, 10), 0), empty)
different_line_box = RotatedBoundingBox(((220, 300), (10, 10), 0), empty)
self.assertTrue(box1.is_overlapping_extrapolated(touching_box, self.unit_size))
self.assertTrue(box1.is_overlapping_extrapolated(samle_line_box, self.unit_size))
self.assertFalse(box1.is_overlapping_extrapolated(different_line_box, self.unit_size))
box2 = RotatedBoundingBox(((100, 200), (10, 10), 45), empty)
touching_box = RotatedBoundingBox(((110, 200), (10, 20), 50), empty)
samle_line_box = RotatedBoundingBox(((110, 200), (10, 10), 45), empty)
different_line_box = RotatedBoundingBox(((140, 220), (10, 10), 0), empty)
self.assertTrue(box2.is_overlapping_extrapolated(touching_box, self.unit_size))
self.assertFalse(box2.is_overlapping_extrapolated(samle_line_box, self.unit_size))
self.assertTrue(box2.is_overlapping_extrapolated(different_line_box, self.unit_size))
def test_is_overlapping_rotated_box_with_rotated_box(self) -> None:
box1 = RotatedBoundingBox(((100, 200), (10, 10), 0), empty)
touching_box = RotatedBoundingBox(((110, 200), (10, 10), 0), empty)
inside_box = RotatedBoundingBox(((105, 200), (10, 10), 0), empty)
far_away_box = RotatedBoundingBox(((200, 200), (10, 10), 0), empty)
crossing_box = RotatedBoundingBox(((105, 205), (10, 10), 90), empty)
self.assertTrue(box1.is_overlapping(touching_box))
self.assertTrue(box1.is_overlapping(inside_box))
self.assertFalse(box1.is_overlapping(far_away_box))
self.assertTrue(box1.is_overlapping(crossing_box))
def test_is_overlapping_rotated_box_with_ellipse(self) -> None:
box1 = RotatedBoundingBox(((100, 200), (10, 10), 0), empty)
touching_box = BoundingEllipse(((110, 200), (10, 10), 0), empty)
inside_box = BoundingEllipse(((105, 200), (10, 10), 0), empty)
far_away_box = BoundingEllipse(((200, 200), (10, 10), 0), empty)
self.assertTrue(box1.is_overlapping(touching_box))
self.assertTrue(box1.is_overlapping(inside_box))
self.assertFalse(box1.is_overlapping(far_away_box))
box2 = RotatedBoundingBox(
((570.1167602539062, 506.98968505859375), (2, 60), -5.042449951171875), empty
)
ellipse2 = BoundingEllipse(((536.93896484375, 470.5845947265625), (13, 17), 5), empty)
self.assertFalse(box2.is_overlapping(ellipse2))
| 2,974 | Python | .py | 44 | 58.181818 | 95 | 0.660813 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,832 | test_poetry_config.py | liebharc_homr/tests/test_poetry_config.py | import pathlib
import re
from platform import python_version
CURRENT_MINOR_VERSION = ".".join(python_version().split(".")[:2])
TEST_FILE_PATH = pathlib.Path(__file__).parent.resolve()
PYPROJECT_TOML_PATH = list(TEST_FILE_PATH.glob("../pyproject.toml"))
MAKEFILE_PATH = list(TEST_FILE_PATH.glob("../Makefile"))
PRECOMMIT_HOOKS_PATH = list(TEST_FILE_PATH.glob("../.git/hooks"))
PYPROJECT_TOML_VERSION_REGEX = r"\n((?:py(?:thon)?)(?:[_-]version)?)\s=\D+(\d+.\d+)"
def test_file_uniqueness() -> None:
# File uniqueness
if len(PYPROJECT_TOML_PATH) != 1:
raise ValueError(
"Found more than one 'pyproject.toml':"
f" {', '.join(str(p) for p in PYPROJECT_TOML_PATH) }"
)
if len(MAKEFILE_PATH) != 1:
raise ValueError(
f"Found more than one 'Makefile': {', '.join(str(p) for p in MAKEFILE_PATH) }"
)
def test_consistent_versioning() -> None:
with open(PYPROJECT_TOML_PATH[0], encoding="utf-8") as f:
pyproject_toml = f.read()
# TOML configurations
toml_versions = re.findall(PYPROJECT_TOML_VERSION_REGEX, pyproject_toml)
for var_name, var_version in toml_versions:
if var_version != CURRENT_MINOR_VERSION:
raise ValueError(
f'"{var_name}" on file pyproject.toml is not set to {CURRENT_MINOR_VERSION}'
)
def test_isset_precommit_hooks() -> None:
if len(PRECOMMIT_HOOKS_PATH) == 0:
raise ValueError("Pre-commit hooks are not set, run `make pre-commit` in `bash`")
| 1,527 | Python | .py | 33 | 39.969697 | 92 | 0.642616 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,833 | test_split_merge_symbols_primus.py | liebharc_homr/tests/test_split_merge_symbols_primus.py | import unittest
from homr.transformer.split_merge_symbols import merge_symbols, split_symbols
def split_merge(merged: str) -> list[str]:
actuallift, actualpitch, actualrhythm, _actualnotes = split_symbols([merged.replace("+", "\t")])
merged_again = merge_symbols(actualrhythm, actualpitch, actuallift)
return merged_again
class TestMergeSymbolsPrimus(unittest.TestCase):
def test_merge(self) -> None:
actual = split_merge(
"clef-C1 timeSignature-C/ note-G4_double_whole note-G4_whole note-A4_whole. note-G4_half note-G4_half note-F#4_half note-G4_double_whole note-G4_half" # noqa: E501
)
self.assertEqual(
actual,
[
"clef-C1+timeSignature-C/+note-G4_breve+note-G4_whole+note-A4_whole.+note-G4_half+note-G4_half+note-F4#_half+note-G4_breve+note-G4_half"
],
)
| 878 | Python | .py | 17 | 43.705882 | 176 | 0.678363 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,834 | test_tr_omr_parser.py | liebharc_homr/tests/test_tr_omr_parser.py | import unittest
from homr import constants
from homr.results import (
ClefType,
DurationModifier,
ResultChord,
ResultClef,
ResultDuration,
ResultMeasure,
ResultNote,
ResultPitch,
ResultStaff,
ResultTimeSignature,
)
from homr.tr_omr_parser import TrOMRParser
def single_note(pitch: ResultPitch, duration: ResultDuration) -> ResultChord:
return ResultChord(
duration,
[
ResultNote(
pitch,
duration,
)
],
)
def note_chord(notes: list[ResultNote]) -> ResultChord:
return ResultChord(notes[0].duration, notes)
class TestTrOmrParser(unittest.TestCase):
unit_size = 3
def test_parsing(self) -> None:
data = "clef-G2+keySignature-FM+timeSignature-/4+note-A4_half+note-B4_half+barline+note-A4_quarter.+note-G4_eighth+note-F4_quarter+note-G4_quarter+barline" # noqa: E501
expected = ResultStaff(
[
ResultMeasure(
[
ResultClef(ClefType.treble(), -1),
ResultTimeSignature(1, 4),
single_note(
ResultPitch("A", 4, None),
ResultDuration(2 * constants.duration_of_quarter),
),
single_note(
ResultPitch("B", 4, None),
ResultDuration(2 * constants.duration_of_quarter),
),
]
),
ResultMeasure(
[
single_note(
ResultPitch("A", 4, None),
ResultDuration(
int(constants.duration_of_quarter), DurationModifier.DOT
),
),
single_note(
ResultPitch("G", 4, None),
ResultDuration(constants.duration_of_quarter // 2),
),
single_note(
ResultPitch("F", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("G", 4, None),
ResultDuration(constants.duration_of_quarter),
),
]
),
]
)
parser = TrOMRParser()
actual = parser.parse_tr_omr_output(data)
self.assertEqual(actual, expected)
def test_parsing_no_final_bar_line(self) -> None:
data = "clef-G2+keySignature-FM+timeSignature-/4+note-A4_half+note-B4_half+barline+note-A4_quarter.+note-G4_eighth+note-F4_quarter+note-G4_quarter" # noqa: E501
expected = ResultStaff(
[
ResultMeasure(
[
ResultClef(ClefType.treble(), -1),
ResultTimeSignature(1, 4),
single_note(
ResultPitch("A", 4, None),
ResultDuration(2 * constants.duration_of_quarter),
),
single_note(
ResultPitch("B", 4, None),
ResultDuration(2 * constants.duration_of_quarter),
),
]
),
ResultMeasure(
[
single_note(
ResultPitch("A", 4, None),
ResultDuration(constants.duration_of_quarter, DurationModifier.DOT),
),
single_note(
ResultPitch("G", 4, None),
ResultDuration(constants.duration_of_quarter // 2),
),
single_note(
ResultPitch("F", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("G", 4, None),
ResultDuration(constants.duration_of_quarter),
),
]
),
]
)
parser = TrOMRParser()
actual = parser.parse_tr_omr_output(data)
self.assertEqual(actual, expected)
def test_rest_parsing(self) -> None:
data = "note-E5_sixteenth|rest-eighth+note-A2_eighth|note-E4_eighth+rest-eighth"
expected = ResultStaff(
[
ResultMeasure(
[
single_note(
ResultPitch("E", 5, None),
ResultDuration(constants.duration_of_quarter // 4),
),
note_chord(
[
ResultNote(
ResultPitch("A", 2, None),
ResultDuration(constants.duration_of_quarter // 2),
),
ResultNote(
ResultPitch("E", 4, None),
ResultDuration(constants.duration_of_quarter // 2),
),
]
),
ResultChord(ResultDuration(constants.duration_of_quarter // 2), []),
]
),
]
)
parser = TrOMRParser()
actual = parser.parse_tr_omr_output(data)
self.assertEqual(actual, expected)
def test_note_group_parsing(self) -> None:
data = "clef-G2+keySignature-CM+note-D4_quarter+note-E4_quarter+note-F4_quarter+note-G4_quarter+barline+note-D4_half+note-D4_half|note-G4_half+barline+note-E4_quarter+note-F4_quarter+note-G4_quarter+note-A4_quarter+barline+note-E4_half+note-E4_half|note-A4_half+barline+note-F4_quarter+note-G4_quarter+note-A4_quarter+note-B4_quarter+barline+note-F4_half+note-F4_half|note-B4_half+barline" # noqa: E501
expected = ResultStaff(
[
ResultMeasure(
[
ResultClef(ClefType.treble(), 0),
single_note(
ResultPitch("D", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("E", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("F", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("G", 4, None),
ResultDuration(constants.duration_of_quarter),
),
]
),
ResultMeasure(
[
single_note(
ResultPitch("D", 4, None),
ResultDuration(
constants.duration_of_quarter * 2,
),
),
note_chord(
[
ResultNote(
ResultPitch("D", 4, None),
ResultDuration(constants.duration_of_quarter * 2),
),
ResultNote(
ResultPitch("G", 4, None),
ResultDuration(constants.duration_of_quarter * 2),
),
]
),
]
),
ResultMeasure(
[
single_note(
ResultPitch("E", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("F", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("G", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("A", 4, None),
ResultDuration(constants.duration_of_quarter),
),
]
),
ResultMeasure(
[
single_note(
ResultPitch("E", 4, None),
ResultDuration(constants.duration_of_quarter * 2),
),
note_chord(
[
ResultNote(
ResultPitch("E", 4, None),
ResultDuration(constants.duration_of_quarter * 2),
),
ResultNote(
ResultPitch("A", 4, None),
ResultDuration(constants.duration_of_quarter * 2),
),
]
),
]
),
ResultMeasure(
[
single_note(
ResultPitch("F", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("G", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("A", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("B", 4, None),
ResultDuration(constants.duration_of_quarter),
),
]
),
ResultMeasure(
[
single_note(
ResultPitch("F", 4, None),
ResultDuration(constants.duration_of_quarter * 2),
),
note_chord(
[
ResultNote(
ResultPitch("F", 4, None),
ResultDuration(constants.duration_of_quarter * 2),
),
ResultNote(
ResultPitch("B", 4, None),
ResultDuration(constants.duration_of_quarter * 2),
),
]
),
]
),
]
)
parser = TrOMRParser()
actual = parser.parse_tr_omr_output(data)
self.assertEqual(actual, expected)
def test_accidental_parsing(self) -> None:
data = "clef-G2+keySignature-DM+note-D4_quarter+note-E4_quarter+note-F4_quarter+note-G4_quarter+note-C5_quarter+note-C5#_quarter+note-A4#_quarter+barline" # noqa: E501
expected = ResultStaff(
[
ResultMeasure(
[
ResultClef(ClefType.treble(), 2),
single_note(
ResultPitch("D", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("E", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("F", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("G", 4, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("C", 5, None),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("C", 5, 1),
ResultDuration(constants.duration_of_quarter),
),
single_note(
ResultPitch("A", 4, 1),
ResultDuration(constants.duration_of_quarter),
),
]
),
]
)
parser = TrOMRParser()
actual = parser.parse_tr_omr_output(data)
self.assertEqual(actual, expected)
def test_parsing_chords_with_rests(self) -> None:
data = "rest_quarter|note-A4_half|note-B4_half"
expected = ResultStaff(
[
ResultMeasure(
[
note_chord(
[
ResultNote(
ResultPitch("A", 4, None),
ResultDuration(2 * constants.duration_of_quarter),
),
ResultNote(
ResultPitch("B", 4, None),
ResultDuration(2 * constants.duration_of_quarter),
),
]
)
]
)
]
)
parser = TrOMRParser()
actual = parser.parse_tr_omr_output(data)
self.assertEqual(actual, expected)
def test_parse_chords_with_unexpected_symbols(self) -> None:
data = "note-A4_half|barline"
expected = ResultStaff(
[
ResultMeasure(
[
single_note(
ResultPitch("A", 4, None),
ResultDuration(2 * constants.duration_of_quarter),
),
]
)
]
)
parser = TrOMRParser()
actual = parser.parse_tr_omr_output(data)
self.assertEqual(actual, expected)
| 15,772 | Python | .py | 363 | 20.429752 | 412 | 0.373723 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,835 | test_music_xml_parser.py | liebharc_homr/tests/test_music_xml_parser.py | import unittest
from training.music_xml import music_xml_string_to_semantic
class TestMusicXmlParser(unittest.TestCase):
def test_parse_xml_with_backup(self) -> None:
self.maxDiff = None
# lc4987672.musicxml, measure 22
example = """<?xml version="1.0" encoding="UTF-8"?>
<score-partwise version="4.0">
<part id="P1">
<measure number="22">
<attributes>
<divisions>4</divisions>
<key>
<fifths>1</fifths>
</key>
<time symbol="common">
<beats>4</beats>
<beat-type>4</beat-type>
</time>
<staves>2</staves>
<clef number="1">
<sign>G</sign>
<line>2</line>
</clef>
<clef number="2">
<sign>F</sign>
<line>4</line>
</clef>
</attributes>
<note>
<pitch>
<step>A</step>
<alter>1</alter>
<octave>3</octave>
</pitch>
<duration>16</duration>
<tie type="stop"/>
<voice>1</voice>
<type>whole</type>
<staff>1</staff>
<notations>
<tied type="stop"/>
</notations>
</note>
<note>
<chord/>
<pitch>
<step>G</step>
<octave>4</octave>
</pitch>
<duration>16</duration>
<tie type="stop"/>
<tie type="start"/>
<voice>1</voice>
<type>whole</type>
<staff>1</staff>
<notations>
<tied type="stop"/>
<tied type="start"/>
</notations>
</note>
<backup>
<duration>16</duration>
</backup>
<note>
<rest/>
<duration>8</duration>
<voice>2</voice>
<type>half</type>
<staff>1</staff>
</note>
<direction placement="below">
<direction-type>
<dynamics default-x="2.78" default-y="-40.00" relative-x="3.29" relative-y="-50.00">
<pp/>
</dynamics>
</direction-type>
<staff>1</staff>
<sound dynamics="36.67"/>
</direction>
<note>
<pitch>
<step>E</step>
<octave>4</octave>
</pitch>
<duration>8</duration>
<tie type="start"/>
<voice>2</voice>
<type>half</type>
<stem>down</stem>
<staff>1</staff>
<notations>
<tied type="start"/>
</notations>
</note>
<backup>
<duration>16</duration>
</backup>
<note>
<pitch>
<step>G</step>
<octave>3</octave>
</pitch>
<duration>4</duration>
<voice>5</voice>
<type>quarter</type>
<stem>down</stem>
<staff>2</staff>
<notations>
<slur type="stop" number="1"/>
</notations>
</note>
<note default-x="55.02" default-y="-298.07">
<rest/>
<duration>4</duration>
<voice>5</voice>
<type>quarter</type>
<staff>2</staff>
</note>
<note default-x="139.48" default-y="-338.07">
<pitch>
<step>C</step>
<octave>2</octave>
</pitch>
<duration>8</duration>
<voice>5</voice>
<type>half</type>
<stem>up</stem>
<staff>2</staff>
</note>
</measure>
</part>
</score-partwise>
"""
semantic = music_xml_string_to_semantic(example)
self.assertEqual(
semantic,
[
[
"clef-G2",
"keySignature-GM",
"timeSignature-4/4",
"note-A3#_whole|note-G4_whole|rest-half",
"note-E4_half",
"barline",
],
[
"clef-F4",
"keySignature-GM",
"timeSignature-4/4",
"note-G3_quarter",
"rest-quarter",
"note-C2_half",
"barline",
],
],
)
def test_parse_xml_with_courtesy_accidental(self) -> None:
self.maxDiff = None
# lc6202270.musicxml, measure 42
example = """<?xml version="1.0" encoding="UTF-8"?>
<score-partwise version="4.0">
<part id="P1">
<measure number="42">
<attributes>
<divisions>4</divisions>
<key>
<fifths>1</fifths>
</key>
<time>
<beats>2</beats>
<beat-type>4</beat-type>
</time>
<staves>2</staves>
<clef number="1">
<sign>G</sign>
<line>2</line>
</clef>
<clef number="2">
<sign>F</sign>
<line>4</line>
</clef>
</attributes>
<note default-x="27.18" default-y="-153.01">
<pitch>
<step>G</step>
<octave>4</octave>
</pitch>
<duration>4</duration>
<tie type="stop"/>
<voice>1</voice>
<type>quarter</type>
<stem>up</stem>
<staff>1</staff>
<notations>
<tied type="stop"/>
</notations>
</note>
<direction placement="below">
<direction-type>
<wedge type="stop" number="1" relative-x="-10.00"/>
</direction-type>
<staff>1</staff>
</direction>
<note default-x="180.53" default-y="-158.01">
<pitch>
<step>F</step>
<alter>1</alter>
<octave>4</octave>
</pitch>
<duration>4</duration>
<voice>1</voice>
<type>quarter</type>
<stem>up</stem>
<staff>1</staff>
<notations>
<slur type="stop" number="1"/>
</notations>
</note>
<backup>
<duration>8</duration>
</backup>
<note default-x="27.18" default-y="-173.01">
<pitch>
<step>C</step>
<alter>1</alter>
<octave>4</octave>
</pitch>
<duration>2</duration>
<voice>2</voice>
<type>eighth</type>
<accidental>sharp</accidental>
<stem>down</stem>
<staff>1</staff>
<beam number="1">begin</beam>
</note>
<note default-x="88.52" default-y="-168.01">
<pitch>
<step>D</step>
<octave>4</octave>
</pitch>
<duration>2</duration>
<voice>2</voice>
<type>eighth</type>
<stem>down</stem>
<staff>1</staff>
<beam number="1">end</beam>
</note>
<note default-x="180.53" default-y="-183.01">
<pitch>
<step>A</step>
<alter>1</alter>
<octave>3</octave>
</pitch>
<duration>2</duration>
<voice>2</voice>
<type>eighth</type>
<accidental>sharp</accidental>
<stem>down</stem>
<staff>1</staff>
<beam number="1">begin</beam>
</note>
<note default-x="241.87" default-y="-178.01">
<pitch>
<step>B</step>
<octave>3</octave>
</pitch>
<duration>2</duration>
<voice>2</voice>
<type>eighth</type>
<stem>down</stem>
<staff>1</staff>
<beam number="1">end</beam>
</note>
<backup>
<duration>8</duration>
</backup>
<note default-x="26.82" default-y="-261.46">
<pitch>
<step>E</step>
<octave>3</octave>
</pitch>
<duration>8</duration>
<tie type="start"/>
<voice>5</voice>
<type>half</type>
<stem>up</stem>
<staff>2</staff>
<notations>
<tied type="start"/>
</notations>
</note>
<backup>
<duration>8</duration>
</backup>
<note default-x="27.18" default-y="-281.46">
<pitch>
<step>A</step>
<alter>1</alter>
<octave>2</octave>
</pitch>
<duration>2</duration>
<voice>6</voice>
<type>eighth</type>
<accidental>sharp</accidental>
<stem>down</stem>
<staff>2</staff>
<beam number="1">begin</beam>
</note>
<note default-x="88.52" default-y="-276.46">
<pitch>
<step>B</step>
<octave>2</octave>
</pitch>
<duration>2</duration>
<voice>6</voice>
<type>eighth</type>
<stem>down</stem>
<staff>2</staff>
<beam number="1">end</beam>
</note>
<note default-x="180.53" default-y="-271.46">
<pitch>
<step>C</step>
<octave>3</octave>
</pitch>
<duration>2</duration>
<voice>6</voice>
<type>eighth</type>
<accidental>natural</accidental>
<stem>down</stem>
<staff>2</staff>
<beam number="1">begin</beam>
</note>
<note default-x="241.87" default-y="-276.46">
<pitch>
<step>B</step>
<octave>2</octave>
</pitch>
<duration>2</duration>
<voice>6</voice>
<type>eighth</type>
<stem>down</stem>
<staff>2</staff>
<beam number="1">end</beam>
</note>
</measure>
</part>
</score-partwise>
"""
semantic = music_xml_string_to_semantic(example)
self.assertEqual(
semantic,
[
[
"clef-G2",
"keySignature-GM",
"timeSignature-2/4",
"note-G4_quarter|note-C4#_eighth",
"note-D4_eighth",
"note-F4_quarter|note-A3#_eighth",
"note-B3_eighth",
"barline",
],
[
"clef-F4",
"keySignature-GM",
"timeSignature-2/4",
"note-E3_half|note-A2#_eighth",
"note-B2_eighth",
"note-C3N_eighth",
"note-B2_eighth",
"barline",
],
],
)
| 10,582 | Python | .py | 376 | 16.797872 | 95 | 0.43625 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,836 | test_result_model.py | liebharc_homr/tests/test_result_model.py | import unittest
from homr.results import ClefType, ResultClef, ResultPitch, move_pitch_to_clef
class TestResultModel(unittest.TestCase):
def test_change_staff(self) -> None:
treble = ResultClef(ClefType.treble(), 1)
bass = ResultClef(ClefType.bass(), 1)
self.assertEqual(
str(move_pitch_to_clef(treble.get_reference_pitch(), treble, bass)),
str(bass.get_reference_pitch()),
)
self.assertEqual(
str(move_pitch_to_clef(bass.get_reference_pitch(), bass, treble)),
str(treble.get_reference_pitch()),
)
self.assertEqual(str(move_pitch_to_clef(ResultPitch("E", 2, 1), bass, treble)), "C4#")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("F", 2, 0), bass, treble)), "D4♮")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("G", 2, -1), bass, treble)), "E4b")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("A", 2, None), bass, treble)), "F4")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("B", 2, 1), bass, treble)), "G4#")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("C", 3, 0), bass, treble)), "A4♮")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("D", 3, -1), bass, treble)), "B4b")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("E", 3, None), bass, treble)), "C5")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("F", 3, 1), bass, treble)), "D5#")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("G", 3, 0), bass, treble)), "E5♮")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("A", 3, -1), bass, treble)), "F5b")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("B", 3, None), bass, treble)), "G5")
self.assertEqual(str(move_pitch_to_clef(ResultPitch("C", 4, 1), bass, treble)), "A5#")
def test_move_pitch(self) -> None:
note_c4 = ResultPitch("C", 4, None)
self.assertEqual(note_c4.move_by(1, None), ResultPitch("D", 4, None))
self.assertEqual(note_c4.move_by(2, None), ResultPitch("E", 4, None))
self.assertEqual(note_c4.move_by(3, None), ResultPitch("F", 4, None))
self.assertEqual(note_c4.move_by(4, None), ResultPitch("G", 4, None))
self.assertEqual(note_c4.move_by(5, None), ResultPitch("A", 4, None))
self.assertEqual(note_c4.move_by(6, None), ResultPitch("B", 4, None))
self.assertEqual(note_c4.move_by(7, None), ResultPitch("C", 5, None))
self.assertEqual(note_c4.move_by(8, None), ResultPitch("D", 5, None))
note_d4 = ResultPitch("D", 4, None)
self.assertEqual(note_d4.move_by(0, None), ResultPitch("D", 4, None))
self.assertEqual(note_d4.move_by(1, None), ResultPitch("E", 4, None))
self.assertEqual(note_d4.move_by(2, None), ResultPitch("F", 4, None))
self.assertEqual(note_d4.move_by(3, None), ResultPitch("G", 4, None))
self.assertEqual(note_d4.move_by(4, None), ResultPitch("A", 4, None))
self.assertEqual(note_d4.move_by(5, None), ResultPitch("B", 4, None))
self.assertEqual(note_d4.move_by(6, None), ResultPitch("C", 5, None))
self.assertEqual(note_d4.move_by(7, None), ResultPitch("D", 5, None))
| 3,289 | Python | .py | 46 | 61.282609 | 97 | 0.625427 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,837 | test_split_merge_symbols.py | liebharc_homr/tests/test_split_merge_symbols.py | import unittest
from homr.transformer.split_merge_symbols import (
convert_alter_to_accidentals,
merge_symbols,
split_symbols,
)
predlift = [
[
"nonote",
"nonote",
"nonote",
"lift_N",
"nonote",
"lift_N",
"nonote",
"lift_N",
"nonote",
"lift_null",
"nonote",
"lift_null",
"lift_N",
"lift_N",
"nonote",
"lift_N",
"nonote",
"lift_N",
"nonote",
"lift_#",
]
]
predpitch = [
[
"nonote",
"nonote",
"nonote",
"note-C4",
"nonote",
"note-F4",
"nonote",
"note-G4",
"nonote",
"note-B4",
"nonote",
"note-B4",
"note-C5",
"note-D5",
"nonote",
"note-C5",
"nonote",
"note-G4",
"nonote",
"note-E4",
]
]
predryhthm = [
[
"clef-G2",
"keySignature-EM",
"timeSignature-/8",
"note-half.",
"barline",
"note-half.",
"barline",
"note-half.",
"barline",
"note-half.",
"barline",
"note-half",
"note-eighth",
"note-eighth",
"barline",
"note-eighth",
"|",
"note-eighth",
"|",
"note-eighth",
]
]
prednotes = [
[
"nonote",
"nonote",
"nonote",
"note",
"nonote",
"note",
"nonote",
"note",
"nonote",
"note",
"nonote",
"note",
"note",
"note",
"nonote",
"note",
"nonote",
"note",
"nonote",
"note",
]
]
merged = [
"clef-G2+keySignature-EM+timeSignature-6/8+note-C4_half.+barline+note-F4_half.+barline+note-G4_half.+barline+note-B4_half.+barline+note-B4_half+note-C5_eighth+note-D5_eighth+barline+note-C5_eighth|note-G4_eighth|note-E4#_eighth"
]
class TestMergeSymbols(unittest.TestCase):
def test_merge(self) -> None:
actual = merge_symbols(predryhthm, predpitch, predlift)
expected = convert_alter_to_accidentals(merged)
expected = [expected[0].replace("timeSignature-6/8", "timeSignature-/8")]
self.assertEqual(actual, expected)
def test_split(self) -> None:
# Replace the + with \t as this is what the input provides
actuallift, actualpitch, actualrhythm, actualnotes = split_symbols(
[merged[0].replace("+", "\t")]
)
self.assertEqual(actualrhythm, predryhthm)
self.assertEqual(actuallift, predlift)
self.assertEqual(actualpitch, predpitch)
self.assertEqual(actualnotes, prednotes)
def test_split_sorts_notes(self) -> None:
# Replace the + with \t as this is what the input provides
_actuallift, actualpitch, _actualrhythm, _actualnotes = split_symbols(
[
"note-E4#_eighth|note-G4_eighth|note-C5_eighth\tnote-C5_eighth|note-E4#_eighth|note-G4_eighth"
]
)
self.assertEqual(
actualpitch,
[
[
"note-C5",
"nonote",
"note-G4",
"nonote",
"note-E4",
"note-C5",
"nonote",
"note-G4",
"nonote",
"note-E4",
]
],
)
def test_split_sorts_notes_and_rests(self) -> None:
self.maxDiff = None
# Replace the + with \t as this is what the input provides
_actuallift, actualpitch, actualrhythm, _actualnotes = split_symbols(
[
"note-E4#_eighth|rest_eighth|note-G4_eighth|rest_quarter|note-C5_eighth\trest_quarter|note-C5_eighth|rest_eighth|note-E4#_eighth|note-G4_eighth"
]
)
pitch_and_rhythm = [
entry[0] if entry[0] != "nonote" else entry[1]
for entry in zip(actualpitch[0], actualrhythm[0], strict=True)
]
self.assertEqual(
pitch_and_rhythm,
[
"note-C5",
"|",
"note-G4",
"|",
"note-E4",
"|",
"rest_eighth",
"|",
"rest_quarter",
"note-C5",
"|",
"note-G4",
"|",
"note-E4",
"|",
"rest_eighth",
"|",
"rest_quarter",
],
)
def test_split_sorts_notes_and_rests_with_different_natural_designation(self) -> None:
self.maxDiff = None
# Replace the + with \t as this is what the input provides
_actuallift, actualpitch, actualrhythm, _actualnotes = split_symbols(
["note-E#4_eighth|note-EN5_eighth"]
)
self.assertEqual(actualpitch, [["note-E5", "nonote", "note-E4"]])
def test_split_restores_accidentals(self) -> None:
"""
The semantic encoding doesn't tell us which accidentals are present in the image.
The best we can do is to restore this information from
the lift symbols and the key information.
"""
merged_accidentals = [
"clef-G2 keySignature-FM timeSignature-4/4 rest-sixteenth note-A3_sixteenth note-C4_sixteenth note-F4_sixteenth note-A4_sixteenth note-C4_sixteenth note-F4_sixteenth rest-sixteenth note-A3_sixteenth note-A3_sixteenth note-C4_sixteenth note-F4_sixteenth note-A4_sixteenth note-C4_sixteenth note-F4_sixteenth rest-sixteenth note-A3_sixteenth rest-sixteenth note-A3_quarter.. note-A3_quarter.. barline rest-sixteenth note-C4_sixteenth note-Eb4_sixteenth note-F4_sixteenth note-C5_sixteenth note-Eb4_sixteenth note-F4_sixteenth rest-sixteenth note-C4_sixteenth note-C4_sixteenth note-D4_sixteenth note-F#4_sixteenth note-C5_sixteenth note-D4_sixteenth note-F#4_sixteenth rest-sixteenth note-C4_sixteenth rest-sixteenth note-C4_quarter.. note-C4_quarter.. barline rest-sixteenth note-C4_sixteenth note-D4_sixteenth note-A4_sixteenth note-C5_sixteenth note-D4_sixteenth note-A4_sixteenth rest-sixteenth note-C4_sixteenth note-Bb3_sixteenth note-D4_sixteenth note-G4_sixteenth note-Bb4_sixteenth note-D4_sixteenth note-G4_sixteenth rest-sixteenth note-Bb3_sixteenth rest-sixteenth note-C4_quarter.. note-Bb3_quarter.. " # noqa: E501
]
actuallift, actualpitch, _actualrhythm, _actualnotes = split_symbols(merged_accidentals)
readable_lift = [
actualpitch[0][i] + lift
for i, lift in enumerate(actuallift[0])
if lift not in ("nonote", "lift_null")
]
self.assertEqual(readable_lift, ["note-E4lift_b", "note-F4lift_#"])
def test_split_restores_natural(self) -> None:
"""
Bugfix: Natural symbols were not persent in the training set.
"""
merged_accidentals = [
"clef-G2 keySignature-GM timeSignature-4/4 note-C4_sixteenth note-F4_sixteenth note-F4_sixteenth" # noqa: E501
]
actuallift, actualpitch, _actualrhythm, _actualnotes = split_symbols(merged_accidentals)
readable_lift = [
actualpitch[0][i] + lift for i, lift in enumerate(actuallift[0]) if lift != "nonote"
]
self.assertEqual(readable_lift, ["note-C4lift_null", "note-F4lift_N", "note-F4lift_null"])
def test_replace_multirests(self) -> None:
merged_multirests = [
"multirest-1 multirest-2 multirest-3 multirest-50 multirest-100 rest-whole2"
]
_actuallift, _actualpitch, actualrhythm, _actualnotes = split_symbols(merged_multirests)
self.assertEqual(
actualrhythm,
[
[
"rest-whole",
"multirest-2",
"multirest-3",
"multirest-10",
"multirest-10",
"multirest-2",
]
],
)
def test_accidentals_dont_affect_octaves(self) -> None:
merged_accidentals = ["clef-G2 keySignature-CM note-F#4_quarter note-F#3_quarter"]
actuallift, actualpitch, _actualrhythm, _actualnotes = split_symbols(merged_accidentals)
readable_lift = [
actualpitch[0][i] + lift for i, lift in enumerate(actuallift[0]) if lift != "nonote"
]
self.assertEqual(readable_lift, ["note-F4lift_#", "note-F3lift_#"])
def test_merge_of_rests_in_chord(self) -> None:
actuallift, actualpitch, actualrhythm, _actualnotes = split_symbols(
["clef-G2|keySignature-GM|timeSignature-4/4|note-C4_sixteenth|rest_quarter"]
)
result = merge_symbols(actualrhythm, actualpitch, actuallift)
self.assertEqual(result, ["note-C4_sixteenth"])
def test_merge_of_rests_in_chord_keep_all_symbols(self) -> None:
actuallift, actualpitch, actualrhythm, _actualnotes = split_symbols(
["clef-G2|keySignature-GM|timeSignature-4/4|note-C4_sixteenth|rest_quarter"]
)
result = merge_symbols(
actualrhythm, actualpitch, actuallift, keep_all_symbols_in_chord=True
)
self.assertEqual(
result, ["note-C4_sixteenth|clef-G2|keySignature-GM|timeSignature-/4|rest_quarter"]
)
def test_split_with_naturals(self) -> None:
# The second natural (F5N) is a courtesey accidental
actuallift, actualpitch, _actualrhythm, _actualnotes = split_symbols(
[
"clef-G2 keySignature-GM note-F5N_eighth. note-F5N_eighth. note-F5_eighth. note-F5#_eighth. note-F5_eighth. note-F5N_eighth." # noqa: E501
]
)
readable_lift = [
actualpitch[0][i] + lift
for i, lift in enumerate(actuallift[0])
if lift not in ("nonote", "lift_null")
]
self.assertEqual(
readable_lift,
["note-F5lift_N", "note-F5lift_#", "note-F5lift_N"],
)
def test_split_with_naturals_no_conversion(self) -> None:
# The second natural (F5N) is a courtesey accidental
actuallift, actualpitch, _actualrhythm, _actualnotes = split_symbols(
[
"clef-G2 keySignature-GM note-F5N_eighth. note-F5N_eighth. note-F5_eighth. note-F5#_eighth. note-F5_eighth. note-F5N_eighth." # noqa: E501
],
convert_to_modified_semantic=False,
)
readable_lift = [
actualpitch[0][i] + lift
for i, lift in enumerate(actuallift[0])
if lift not in ("nonote", "lift_null")
]
self.assertEqual(
readable_lift,
["note-F5lift_N", "note-F5lift_#", "note-F5lift_N"],
)
| 10,888 | Python | .py | 288 | 27.475694 | 1,138 | 0.56488 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,838 | test_model.py | liebharc_homr/tests/test_model.py | import unittest
import numpy as np
from homr.bounding_boxes import RotatedBoundingBox
from homr.model import MultiStaff, Staff, StaffPoint
def make_staff(number: int) -> Staff:
y_points = [10 * i + 100 * float(number) for i in range(5)]
return Staff([StaffPoint(0.0, y_points, 0)])
def make_connection(number: int) -> RotatedBoundingBox:
rect = ((float(number), float(number)), (number, number), float(number))
contours = np.empty((0, 0))
return RotatedBoundingBox(rect, contours)
class TestModel(unittest.TestCase):
def test_multi_staff_merge(self) -> None:
staff1 = MultiStaff(
[make_staff(1), make_staff(2)], [make_connection(1), make_connection(2)]
)
staff2 = MultiStaff(
[staff1.staffs[1], make_staff(3)], [staff1.connections[1], make_connection(3)]
)
result1 = staff1.merge(staff2)
self.assertEqual(result1.staffs, [staff1.staffs[0], staff1.staffs[1], staff2.staffs[1]])
self.assertEqual(
result1.connections,
[staff1.connections[0], staff1.connections[1], staff2.connections[1]],
)
result2 = staff2.merge(staff1)
self.assertEqual(result2.staffs, [staff1.staffs[0], staff2.staffs[0], staff2.staffs[1]])
self.assertEqual(
result2.connections,
[staff2.connections[0], staff2.connections[1], staff1.connections[0]],
)
| 1,464 | Python | .py | 31 | 38.387097 | 97 | 0.644728 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,839 | test_mix_datasets.py | liebharc_homr/tests/test_mix_datasets.py | import unittest
from training.transformer.mix_datasets import mix_training_sets
class TestMixDataSets(unittest.TestCase):
def test_mix_training_sets(self):
dataset1 = ["a", "b", "c", "d", "e", "f", "g"]
dataset2 = ["1", "2", "3", "5", "6"]
dataset3 = ["x", "y", "z"]
actual = mix_training_sets([dataset1, dataset2, dataset3], [0.5, 1.0, 1.0], 10)
self.assertEqual(len(actual), 10)
for element in dataset2:
self.assertTrue(element in actual)
for element in dataset3:
self.assertTrue(element in actual)
| 589 | Python | .py | 13 | 37.538462 | 87 | 0.606643 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,840 | symbol_error_rate.py | liebharc_homr/validation/symbol_error_rate.py | import argparse
import os
from pathlib import Path
import cv2
import editdistance # type: ignore
from homr import download_utils
from homr.simple_logging import eprint
from homr.transformer.configs import Config
from homr.transformer.staff2score import Staff2Score
from training.musescore_svg import get_position_from_multiple_svg_files
from training.music_xml import group_in_measures, music_xml_to_semantic
def calc_symbol_error_rate_for_list(dataset: list[str], config: Config) -> None:
model = Staff2Score(config, keep_all_symbols_in_chord=True)
checkpoint_file = Path(config.filepaths.checkpoint).resolve()
result_file = str(checkpoint_file).split(".")[0] + "_ser.txt"
all_sers = []
i = 0
total = len(dataset)
interesting_results: list[tuple[str, str]] = []
for sample in dataset:
img_path, semantic_path = sample.strip().split(",")
expected_str = _load_semantic_file(semantic_path)[0].strip()
image = cv2.imread(img_path)
actual = model.predict(image)[0].split("+")
actual = [
symbol for symbol in actual if not symbol.startswith("timeSignature")
] # reference data has no time signature
expected = expected_str.split("+")
actual = sort_chords(actual)
expected = sort_chords(expected)
distance = editdistance.eval(expected, actual)
ser = distance / len(expected)
all_sers.append(ser)
ser = round(100 * ser)
ser_avg = round(100 * sum(all_sers) / len(all_sers))
i += 1
is_staff_with_accidentals = "Polyphonic_tude_No" in img_path and "staff-3" in img_path
if is_staff_with_accidentals:
interesting_results.append((str.join(" ", expected), str.join(" ", actual)))
percentage = round(i / total * 100)
eprint(f"Progress: {percentage}%, SER: {ser}%, SER avg: {ser_avg}%")
for result in interesting_results:
eprint("Expected:", result[0])
eprint("Actual :", result[1])
ser_avg = round(100 * sum(all_sers) / len(all_sers))
eprint(f"Done, SER avg: {ser_avg}%")
with open(result_file, "w") as f:
f.write(f"SER avg: {ser_avg}%\n")
def _load_semantic_file(semantic_path: str) -> list[str]:
with open(semantic_path) as f:
return f.readlines()
def sort_chords(symbols: list[str]) -> list[str]:
result = []
for symbol in symbols:
result.append(str.join("|", sorted(symbol.split("|"))))
return result
def index_folder(folder: str, index_file: str) -> None:
with open(index_file, "w") as index:
for subfolder in reversed(os.listdir(folder)):
full_name = os.path.abspath(os.path.join(folder, subfolder))
if not os.path.isdir(full_name):
continue
file = os.path.join(full_name, "music.musicxml")
semantic = music_xml_to_semantic(file)
measures = [group_in_measures(voice) for voice in semantic]
svg_files = get_position_from_multiple_svg_files(file)
number_of_voices = len(semantic)
total_number_of_measures = semantic[0].count("barline")
measures_in_svg = [sum(s.number_of_measures for s in file.staffs) for file in svg_files]
sum_of_measures_in_xml = total_number_of_measures * number_of_voices
if sum(measures_in_svg) != sum_of_measures_in_xml:
eprint(
file,
"INFO: Number of measures in SVG files",
sum(measures_in_svg),
"does not match number of measures in XML",
sum_of_measures_in_xml,
)
continue
voice = 0
total_staffs_in_previous_files = 0
for svg_file in svg_files:
for staff_idx, staff in enumerate(svg_file.staffs):
selected_measures: list[str] = []
staffs_per_voice = len(svg_file.staffs) // number_of_voices
for _ in range(staff.number_of_measures):
selected_measures.append(str.join("+", measures[voice][1].pop(0)))
prelude = measures[voice][0]
semantic_content = str.join("+", selected_measures) + "\n"
if not semantic_content.startswith("clef"):
semantic_content = prelude + semantic_content
file_number = (
total_staffs_in_previous_files
+ voice * staffs_per_voice
+ staff_idx // number_of_voices
)
file_name = f"staff-{file_number}.jpg"
staff_image = os.path.join(full_name, file_name)
with open(os.path.join(full_name, f"staff-{file_number}.semantic"), "w") as f:
f.write(semantic_content)
voice = (voice + 1) % number_of_voices
if os.path.exists(staff_image):
index.write(
staff_image
+ ","
+ os.path.join(full_name, f"staff-{file_number}.semantic")
+ "\n"
)
total_staffs_in_previous_files += len(svg_file.staffs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Calculate symbol error rate.")
parser.add_argument("checkpoint_file", type=str, help="Path to the checkpoint file.")
args = parser.parse_args()
script_location = os.path.dirname(os.path.realpath(__file__))
data_set_location = os.path.join(script_location, "..", "datasets")
validation_data_set_location = os.path.join(data_set_location, "validation")
download_path = os.path.join(data_set_location, "validation.zip")
download_url = "https://github.com/liebharc/homr/releases/download/datasets/validation.zip"
if not os.path.exists(validation_data_set_location):
try:
eprint("Downloading validation data set")
download_utils.download_file(download_url, download_path)
download_utils.unzip_file(download_path, data_set_location)
finally:
if os.path.exists(download_path):
os.remove(download_path)
index_file = os.path.join(validation_data_set_location, "index.txt")
if not os.path.exists(index_file):
index_folder(validation_data_set_location, index_file)
with open(index_file) as f:
index = f.readlines()
config = Config()
is_dir = os.path.isdir(args.checkpoint_file)
if is_dir:
# glob recursive for all model.safetensors file in the directory
checkpoint_files = list(Path(args.checkpoint_file).rglob("model.safetensors"))
else:
checkpoint_files = [Path(args.checkpoint_file)]
for checkpoint_file in checkpoint_files:
config.filepaths.checkpoint = str(checkpoint_file)
calc_symbol_error_rate_for_list(index, config)
| 7,060 | Python | .py | 141 | 38.808511 | 100 | 0.601653 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,841 | rate_validation_result.py | liebharc_homr/validation/rate_validation_result.py | # mypy: disable-error-code="no-any-return, no-any-unimported"
import argparse
import glob
import os
import xml.etree.ElementTree as ET
import editdistance # type: ignore
import musicxml.xmlelement.xmlelement as mxl # type: ignore
from musicxml.parser.parser import _parse_node # type: ignore
from homr.simple_logging import eprint
class Note:
def __init__(self, note: mxl.XMLNote) -> None: # type: ignore
self.note = note
self.is_chord = get_child_of_type(note, mxl.XMLChord) is not None
self.pitch = get_child_of_type(note, mxl.XMLPitch)
if self.pitch:
self.step = get_child_of_type(self.pitch, mxl.XMLStep)._value
self.alter = get_child_of_type(self.pitch, mxl.XMLAlter)._value
self.octave = get_child_of_type(self.pitch, mxl.XMLOctave)._value
else:
# Rest
self.step = None
self.alter = None
self.octave = None
self.duration = get_child_of_type(note, mxl.XMLDuration)._value
def __str__(self) -> str:
return f"{self.step}-{self.octave}-{self.alter}: {self.duration}"
def __repr__(self) -> str:
return f"{self.step}-{self.octave}-{self.alter}: {self.duration}"
def __hash__(self) -> int:
return hash((self.step, self.octave, self.duration, self.alter))
def __eq__(self, __value: object) -> bool:
if not isinstance(__value, Note):
return False
return (
self.step == __value.step
and self.octave == __value.octave
and self.duration == __value.duration
and self.alter == __value.alter
)
def __lt__(self, other: "Note") -> bool:
if self.step != other.step:
return self.step < other.step
if self.octave != other.octave:
return self.octave < other.octave
if self.alter != other.alter:
return self.alter < other.alter
return self.duration < other.duration
class MusicFile:
def __init__(self, filename: str, keys: list[int], notes: list[Note]) -> None:
self.filename = filename
self.keys = keys.copy()
self.notes = notes.copy()
self.notestr = [str(note) for note in notes]
self.is_reference = "reference" in filename
def diff(self, other: "MusicFile") -> int:
notedist = editdistance.eval(self.notestr, other.notestr)
keydist = editdistance.eval(self.keys, other.keys)
keydiff_rating = 10 # Rate keydiff higher than notediff
return keydiff_rating * keydist + notedist
def __str__(self) -> str:
return str.join(" ", self.notestr)
def __repr__(self) -> str:
return str.join(" ", self.notestr)
def all_files_in_folder(foldername: str) -> list[str]:
return glob.glob(os.path.join(foldername, "*"))
def get_all_direct_subfolders(foldername: str) -> list[str]:
return sorted(
[
os.path.join(foldername, f)
for f in os.listdir(foldername)
if os.path.isdir(os.path.join(foldername, f))
]
)
def is_file_is_empty(filename: str) -> bool:
return os.stat(filename).st_size == 0
def find_minimal_diff_against_all_other_files(
file: MusicFile, files: list[MusicFile]
) -> tuple[int | None, MusicFile | None]:
minimal_diff = None
minimal_diff_file = None
for other_file in files:
if other_file != file:
diff = diff_against_reference(file, other_file)
if minimal_diff is None or diff < minimal_diff:
minimal_diff = diff
minimal_diff_file = other_file
return minimal_diff, minimal_diff_file
def diff_against_reference(file: MusicFile, reference: MusicFile) -> int:
return file.diff(reference)
def remove_node_recursively(doc: mxl.XMLScorePartwise, node_names: list[str]) -> None:
for parent in doc.iter():
for node_name in node_names:
for child in parent.findall(node_name):
parent.remove(child)
def parse_musicxml(filename: str) -> mxl.XMLScorePartwise:
tree = ET.ElementTree()
tree.parse(filename)
root = tree.getroot()
remove_node_recursively(root, ["miscellaneous", "sound"])
node: mxl.XMLScorePartwise = _parse_node(root)
return node
def get_child_of_type(node: mxl.XMLElement, xml_type: type) -> mxl.XMLElement:
children = [child for child in node.get_children() if isinstance(child, xml_type)]
if len(children) == 0:
return None
return children[0]
def get_all_measures(node: mxl.XMLScorePartwise) -> list[mxl.XMLMeasure]:
parts = [part for part in node.get_leaves() if isinstance(part, mxl.XMLPart)]
measures = [
measure
for part in parts
for measure in part.get_children()
if isinstance(measure, mxl.XMLMeasure)
]
return measures
def get_all_keys_from_measures(measures: list[mxl.XMLMeasure]) -> list[int]:
def get_fifth(key: mxl.XMLKey) -> int:
fifths = get_child_of_type(key, mxl.XMLFifths)
if fifths is None:
return 0
return fifths._value
keys = [
key
for measure in measures
for attribute in measure.get_children()
if isinstance(attribute, mxl.XMLAttributes)
for key in attribute.get_children()
if isinstance(key, mxl.XMLKey)
]
return [get_fifth(key) for key in keys]
def get_all_notes_from_measure(measure: list[mxl.XMLMeasure]) -> list[Note]:
notes = [note for note in measure.get_children() if isinstance(note, mxl.XMLNote)] # type: ignore
return sort_notes_in_chords([Note(note) for note in notes])
def sort_notes_in_chords(notes: list[Note]) -> list[Note]:
"""
Notes in a chord are not sorted in music XML. In order to compare them, we need to sort them.
We use the pitch as sort criteria.
"""
chords: list[list[Note]] = []
for note in notes:
if note.is_chord:
chords[-1].append(note)
else:
chords.append([note])
sorted_chords = [sorted(chord) for chord in chords]
flattened_chords = [note for chord in sorted_chords for note in chord]
return flattened_chords
def get_all_notes_from_measures(measures: list[mxl.XMLMeasure]) -> list[Note]:
return [note for measure in measures for note in get_all_notes_from_measure(measure)]
def get_keys_and_notes_from_filename(filename: str) -> MusicFile:
xml = parse_musicxml(filename)
measures = get_all_measures(xml)
keys = get_all_keys_from_measures(measures)
notes = get_all_notes_from_measures(measures)
file = MusicFile(filename, keys, notes)
return file
def is_xml_or_musicxml(filename: str) -> bool:
return filename.endswith((".xml", ".musicxml"))
def rate_folder(foldername: str) -> tuple[float | None, int]:
files = all_files_in_folder(foldername)
all_diffs = []
sum_of_failures = 0
xmls = []
for file in files:
if not is_xml_or_musicxml(file):
continue
if is_file_is_empty(file):
eprint(">>> Found empty file, that means that the run failed", os.path.basename(file))
sum_of_failures += 1
continue
xmls.append(get_keys_and_notes_from_filename(file))
if len(xmls) <= 1:
eprint("Not enough files found to compare", foldername)
sum_of_failures += len(xmls)
return None, sum_of_failures
reference = [xml for xml in xmls if xml.is_reference]
folder_base_name = os.path.basename(foldername.rstrip(os.path.sep))
if len(reference) != 1:
for xml in xmls:
minimal_diff, minimal_diff_file = find_minimal_diff_against_all_other_files(xml, xmls)
if minimal_diff is None or minimal_diff_file is None:
eprint("No minimal diff found for", xml.filename)
sum_of_failures += 1
continue
all_diffs.append(minimal_diff)
else:
for xml in xmls:
if xml.is_reference:
continue
diff = diff_against_reference(xml, reference[0])
all_diffs.append(diff)
average_diff = sum(all_diffs) / len(all_diffs)
eprint("In folder", folder_base_name, ": Average diff is", average_diff)
return average_diff, sum_of_failures
def write_validation_result_for_folder(
foldername: str, diffs: float, failures: int, lines: list[str]
) -> None:
with open(os.path.join(foldername, "validation_result.txt"), "w") as f:
for line in lines:
f.write(line + "\n")
f.write("Diffs: " + str(diffs) + "\n")
f.write("Failures: " + str(failures) + "\n")
def rate_all_folders(foldername: str) -> bool:
folders = get_all_direct_subfolders(foldername)
if len(folders) == 0:
return False
all_diffs = []
sum_of_failures = 0
lines = []
for folder in folders:
diffs, failures = rate_folder(folder)
if diffs is not None:
all_diffs.append(diffs)
folder_base_name = os.path.basename(folder)
lines.append(folder_base_name + ": " + str(diffs) + ", " + str(failures))
sum_of_failures += failures
if len(all_diffs) == 0:
eprint("Everything failed")
return True
average_diff = sum(all_diffs) / len(all_diffs)
write_validation_result_for_folder(foldername, average_diff, sum_of_failures, lines)
eprint()
for line in lines:
eprint(line)
eprint("Average diff:", average_diff)
eprint("Sum of failures:", sum_of_failures)
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Rate validation results.")
parser.add_argument(
"folder", type=str, help="The folder to rate. If 'latest', the newest folder will be rated."
)
args = parser.parse_args()
if not rate_all_folders(args.folder):
rate_folder(args.folder)
| 10,211 | Python | .py | 235 | 34.744681 | 103 | 0.623631 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,842 | title_detection.py | liebharc_homr/homr/title_detection.py | import re
import easyocr # type: ignore
from homr.debug import Debug
from homr.model import Staff
reader = easyocr.Reader(["de", "en"], gpu=False, verbose=False)
def cleanup_text(text: str) -> str:
"""
Remove all special characters from the text. Merge multiple whitespaces into a single space.
"""
return re.sub(r"[^a-zA-Z0-9]+", " ", text).strip()
def detect_title(debug: Debug, top_staff: Staff) -> str:
image = debug.original_image
height = int(15 * top_staff.average_unit_size)
y = max(int(top_staff.min_y) - height, 0)
x = max(int(top_staff.min_x) - 50, 0)
width = int(top_staff.max_x - top_staff.min_x) + 100
width = min(width, image.shape[1] - x)
height = min(height, image.shape[0] - y)
above_staff = image[y : y + height, x : x + width]
tesseract_input = debug.write_model_input_image("_tesseract_input.png", above_staff)
result = reader.readtext(tesseract_input, detail=0, paragraph=True)
if len(result) == 0:
return ""
return cleanup_text(result[0])
| 1,075 | Python | .py | 24 | 39.125 | 97 | 0.651208 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,843 | staff_parsing.py | liebharc_homr/homr/staff_parsing.py | import cv2
import numpy as np
from homr import constants
from homr.debug import Debug
from homr.image_utils import crop_image_and_return_new_top
from homr.model import InputPredictions, MultiStaff, Note, NoteGroup, Staff
from homr.results import (
ResultChord,
ResultClef,
ResultMeasure,
ResultStaff,
ResultTimeSignature,
move_pitch_to_clef,
)
from homr.simple_logging import eprint
from homr.staff_dewarping import StaffDewarping, dewarp_staff_image
from homr.staff_parsing_tromr import parse_staff_tromr
from homr.type_definitions import NDArray
def _have_all_the_same_number_of_staffs(staffs: list[MultiStaff]) -> bool:
for staff in staffs:
if len(staff.staffs) != len(staffs[0].staffs):
return False
return True
def _is_close_to_image_top_or_bottom(staff: MultiStaff, predictions: InputPredictions) -> bool:
tolerance = 50
closest_distance_to_top_or_bottom = [
min(s.min_x, predictions.preprocessed.shape[0] - s.max_x) for s in staff.staffs
]
return min(closest_distance_to_top_or_bottom) < tolerance
def _ensure_same_number_of_staffs(
staffs: list[MultiStaff], predictions: InputPredictions
) -> list[MultiStaff]:
if _have_all_the_same_number_of_staffs(staffs):
return staffs
if len(staffs) > 2: # noqa: PLR2004
if _is_close_to_image_top_or_bottom(
staffs[0], predictions
) and _have_all_the_same_number_of_staffs(staffs[1:]):
eprint("Removing first system from all voices, as it has a different number of staffs")
return staffs[1:]
if _is_close_to_image_top_or_bottom(
staffs[-1], predictions
) and _have_all_the_same_number_of_staffs(staffs[:-1]):
eprint("Removing last system from all voices, as it has a different number of staffs")
return staffs[:-1]
result: list[MultiStaff] = []
for staff in staffs:
result.extend(staff.break_apart())
return sorted(result, key=lambda s: s.staffs[0].min_y)
def _get_number_of_voices(staffs: list[MultiStaff]) -> int:
return len(staffs[0].staffs)
tr_omr_max_height = 128
tr_omr_max_width = 1280
def get_tr_omr_canvas_size(
image_shape: tuple[int, ...], margin_top: int = 0, margin_bottom: int = 0
) -> NDArray:
tr_omr_max_height_with_margin = tr_omr_max_height - margin_top - margin_bottom
tr_omr_ratio = float(tr_omr_max_height_with_margin) / tr_omr_max_width
height, width = image_shape[:2]
# Calculate the new size such that it fits exactly into the
# tr_omr_max_height and tr_omr_max_width
# while maintaining the aspect ratio of height and width.
if height / width > tr_omr_ratio:
# The height is the limiting factor.
new_shape = [
int(width / height * tr_omr_max_height_with_margin),
tr_omr_max_height_with_margin,
]
else:
# The width is the limiting factor.
new_shape = [tr_omr_max_width, int(height / width * tr_omr_max_width)]
return np.array(new_shape)
def center_image_on_canvas(
image: NDArray, canvas_size: NDArray, margin_top: int = 0, margin_bottom: int = 0
) -> NDArray:
resized = cv2.resize(image, canvas_size) # type: ignore
new_image = np.zeros((tr_omr_max_height, tr_omr_max_width, 3), np.uint8)
new_image[:, :] = (255, 255, 255)
# Copy the resized image into the center of the new image.
x_offset = 0
tr_omr_max_height_with_margin = tr_omr_max_height - margin_top - margin_bottom
y_offset = (tr_omr_max_height_with_margin - resized.shape[0]) // 2 + margin_top
new_image[y_offset : y_offset + resized.shape[0], x_offset : x_offset + resized.shape[1]] = (
resized
)
return new_image
def add_image_into_tr_omr_canvas(
image: NDArray, margin_top: int = 0, margin_bottom: int = 0
) -> NDArray:
new_shape = get_tr_omr_canvas_size(image.shape, margin_top, margin_bottom)
new_image = center_image_on_canvas(image, new_shape, margin_top, margin_bottom)
return new_image
def copy_image_in_center_of_double_the_height_and_white_background(image: NDArray) -> NDArray:
height, width = image.shape[:2]
new_image = np.zeros((height * 2, width, 3), np.uint8)
new_image[:, :] = (255, 255, 255)
new_image[height // 2 : height // 2 + height, :] = image
return new_image
def remove_black_contours_at_edges_of_image(bgr: NDArray, unit_size: float) -> NDArray:
gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 97, 255, cv2.THRESH_BINARY)
thresh = 255 - thresh # type: ignore
contours, _hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
threshold = constants.black_spot_removal_threshold(unit_size)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if w < threshold or h < threshold:
continue
is_at_edge_of_image = x == 0 or y == 0 or x + w == bgr.shape[1] or y + h == bgr.shape[0]
if not is_at_edge_of_image:
continue
average_gray_intensity = 127
is_mostly_dark = np.mean(thresh[y : y + h, x : x + w]) < average_gray_intensity # type: ignore
if is_mostly_dark:
continue
bgr[y : y + h, x : x + w] = (255, 255, 255)
return bgr
def _get_min_max_y_position_of_notes(staff: Staff) -> tuple[float, float]:
min_y = staff.min_y - 2.5 * staff.average_unit_size
max_y = staff.max_y + 2.5 * staff.average_unit_size
for symbol in staff.symbols:
if isinstance(symbol, NoteGroup):
for note in symbol.notes:
min_y = min(min_y, note.center[1] - staff.average_unit_size)
max_y = max(max_y, note.center[1] + staff.average_unit_size)
elif isinstance(symbol, Note):
min_y = min(min_y, symbol.center[1] - staff.average_unit_size)
max_y = max(max_y, symbol.center[1] + staff.average_unit_size)
return min_y, max_y
def _calculate_region(staff: Staff, x_values: NDArray, y_values: NDArray) -> NDArray:
x_min = min(*x_values, staff.min_x) - 2 * staff.average_unit_size
x_max = max(*x_values, staff.max_x) + 2 * staff.average_unit_size
staff_min_y, staff_max_y = _get_min_max_y_position_of_notes(staff)
y_min = min(*(y_values - 0.5 * staff.average_unit_size), staff_min_y)
y_max = max(*(y_values + 0.5 * staff.average_unit_size), staff_max_y)
return np.array([int(x_min), int(y_min), int(x_max), int(y_max)])
def _calculate_offsets(staff: Staff, ranges: list[float]) -> list[float]:
staff_center = (staff.max_y + staff.min_y) // 2
y_offsets = []
staff_above = max([r for r in ranges if r < staff_center], default=-1)
if staff_above >= 0:
y_offsets.append(staff.max_y - staff_above)
staff_below = min([r for r in ranges if r > staff_center], default=-1)
if staff_below >= 0:
y_offsets.append(staff_below - staff.min_y)
return y_offsets
def _adjust_region(region: NDArray, y_offsets: list[float], staff: Staff) -> NDArray:
if len(y_offsets) > 0:
min_y_offset = min(y_offsets)
if (
min_y_offset > 3 * staff.average_unit_size
and min_y_offset < 8 * staff.average_unit_size
):
region[1] = int(staff.min_y - min_y_offset)
region[3] = int(staff.max_y + min_y_offset)
return region
def prepare_staff_image(
debug: Debug,
index: int,
ranges: list[float],
staff: Staff,
predictions: InputPredictions,
perform_dewarp: bool = True,
) -> tuple[NDArray, Staff]:
centers = [s.center for s in staff.symbols]
x_values = np.array([c[0] for c in centers])
y_values = np.array([c[1] for c in centers])
region = _calculate_region(staff, x_values, y_values)
y_offsets = _calculate_offsets(staff, ranges)
region = _adjust_region(region, y_offsets, staff)
staff_image = predictions.preprocessed
image_dimensions = get_tr_omr_canvas_size(
(int(region[3] - region[1]), int(region[2] - region[0]))
)
scaling_factor = image_dimensions[1] / (region[3] - region[1])
staff_image = cv2.resize(
staff_image,
(int(staff_image.shape[1] * scaling_factor), int(staff_image.shape[0] * scaling_factor)),
)
region = np.round(region * scaling_factor)
if perform_dewarp:
eprint("Dewarping staff", index)
region_step1 = np.array(region) + np.array([-10, -50, 10, 50])
staff_image, top_left = crop_image_and_return_new_top(staff_image, *region_step1)
region_step2 = np.array(region) - np.array([*top_left, *top_left])
top_left = top_left / scaling_factor
staff = _dewarp_staff(staff, None, top_left, scaling_factor)
dewarp = dewarp_staff_image(staff_image, staff, index, debug)
staff_image = (255 * dewarp.dewarp(staff_image)).astype(np.uint8)
staff_image, top_left = crop_image_and_return_new_top(staff_image, *region_step2)
scaling_factor = 1
eprint("Dewarping staff", index, "done")
else:
staff_image, top_left = crop_image_and_return_new_top(staff_image, *region)
staff_image = remove_black_contours_at_edges_of_image(staff_image, staff.average_unit_size)
staff_image = center_image_on_canvas(staff_image, image_dimensions)
debug.write_image_with_fixed_suffix(f"_staff-{index}_input.jpg", staff_image)
if debug.debug:
transformed_staff = _dewarp_staff(staff, dewarp, top_left, scaling_factor)
transformed_staff_image = staff_image.copy()
for symbol in transformed_staff.symbols:
center = symbol.center
cv2.circle(transformed_staff_image, (int(center[0]), int(center[1])), 5, (0, 0, 255))
if isinstance(symbol, NoteGroup):
for note in symbol.notes:
cv2.circle(
transformed_staff_image,
(int(note.center[0]), int(note.center[1])),
3,
(255, 255, 0),
)
cv2.putText(
transformed_staff_image,
type(symbol).__name__,
(int(center[0]), int(center[1])),
cv2.FONT_HERSHEY_SIMPLEX,
0.3,
(0, 0, 255),
1,
)
debug.write_image_with_fixed_suffix(
f"_staff-{index}_debug_annotated.jpg", transformed_staff_image
)
return staff_image, staff
def _dewarp_staff(
staff: Staff, dewarp: StaffDewarping | None, region: NDArray, scaling: float
) -> Staff:
"""
Applies the same transformation on the staff coordinates as we did on the image.
"""
def transform_coordinates(point: tuple[float, float]) -> tuple[float, float]:
x, y = point
x -= region[0]
y -= region[1]
if dewarp is not None:
x, y = dewarp.dewarp_point((x, y))
x = x * scaling
y = y * scaling
return x, y
return staff.transform_coordinates(transform_coordinates)
def parse_staff_image(
debug: Debug, ranges: list[float], index: int, staff: Staff, predictions: InputPredictions
) -> ResultStaff | None:
staff_image, transformed_staff = prepare_staff_image(
debug, index, ranges, staff, predictions, perform_dewarp=True
)
attention_debug = debug.build_attention_debug(staff_image, f"_staff-{index}_output.jpg")
eprint("Running TrOmr inference on staff image", index)
result = parse_staff_tromr(
staff_image=staff_image,
staff=transformed_staff,
debug=attention_debug,
)
if attention_debug is not None:
attention_debug.write()
return result
def _pick_dominant_clef(staff: ResultStaff) -> ResultStaff: # noqa: C901, PLR0912
clefs = [clef for clef in staff.get_symbols() if isinstance(clef, ResultClef)]
clef_types = [clef.clef_type for clef in clefs]
if len(clef_types) == 0:
return staff
most_frequent_clef_type = max(set(clef_types), key=clef_types.count)
if most_frequent_clef_type is None:
return staff
if clef_types.count(most_frequent_clef_type) == 1:
return staff
circle_of_fifth = 0 # doesn't matter if we only look at the clef type
most_frequent_clef = ResultClef(most_frequent_clef_type, circle_of_fifth)
last_clef_was_originally = None
for symbol in staff.get_symbols():
if isinstance(symbol, ResultClef):
last_clef_was_originally = ResultClef(symbol.clef_type, 0)
symbol.clef_type = most_frequent_clef_type
elif isinstance(symbol, ResultChord):
for note in symbol.notes:
note.pitch = move_pitch_to_clef(
note.pitch, last_clef_was_originally, most_frequent_clef
)
elif isinstance(symbol, ResultMeasure):
for measure_symbol in symbol.symbols:
if isinstance(symbol, ResultClef):
last_clef_was_originally = ResultClef(symbol.clef_type, 0)
symbol.clef_type = most_frequent_clef_type
elif isinstance(measure_symbol, ResultChord):
for note in measure_symbol.notes:
note.pitch = move_pitch_to_clef(
note.pitch, last_clef_was_originally, most_frequent_clef
)
return staff
def _pick_dominant_key_signature(staff: ResultStaff) -> ResultStaff:
clefs = [clef for clef in staff.get_symbols() if isinstance(clef, ResultClef)]
key_signatures = [clef.circle_of_fifth for clef in clefs]
if len(key_signatures) == 0:
return staff
most_frequent_key = max(set(key_signatures), key=key_signatures.count)
if most_frequent_key is None:
return staff
if key_signatures.count(most_frequent_key) == 1:
return staff
for clef in clefs:
clef.circle_of_fifth = most_frequent_key
return staff
def _remove_redundant_clefs(measures: list[ResultMeasure]) -> None:
last_clef = None
for measure in measures:
for symbol in measure.symbols:
if isinstance(symbol, ResultClef):
if last_clef is not None and last_clef == symbol:
measure.remove_symbol(symbol)
else:
last_clef = symbol
def _remove_all_but_first_time_signature(measures: list[ResultMeasure]) -> None:
"""
The transformer tends to hallucinate time signatures. In most cases there is only one
time signature at the beginning, so we remove all others.
"""
last_sig = None
for measure in measures:
for symbol in measure.symbols:
if isinstance(symbol, ResultTimeSignature):
if last_sig is not None:
measure.remove_symbol(symbol)
else:
last_sig = symbol
def merge_and_clean(staffs: list[ResultStaff], force_single_clef_type: bool) -> ResultStaff:
"""
Merge all staffs of a voice into a single staff.
"""
result = ResultStaff([])
for staff in staffs:
result = result.merge(staff)
if force_single_clef_type:
_pick_dominant_clef(result)
_pick_dominant_key_signature(result)
_remove_redundant_clefs(result.measures)
_remove_all_but_first_time_signature(result.measures)
result.measures = [measure for measure in result.measures if not measure.is_empty()]
return result
def determine_ranges(staffs: list[MultiStaff]) -> list[float]:
staff_centers = []
for voice in staffs:
for staff in voice.staffs:
staff_centers.append((staff.max_y + staff.min_y) // 2)
staff_centers = sorted(staff_centers)
return staff_centers
def remember_new_line(measures: list[ResultMeasure]) -> None:
if len(measures) > 0:
measures[0].is_new_line = True
def parse_staffs(
debug: Debug, staffs: list[MultiStaff], predictions: InputPredictions
) -> list[ResultStaff]:
"""
Dewarps each staff and then runs it through an algorithm which extracts
the rhythm and pitch information.
"""
staffs = _ensure_same_number_of_staffs(staffs, predictions)
# For simplicity we call every staff in a multi staff a voice,
# even if it's part of a grand staff.
number_of_voices = _get_number_of_voices(staffs)
i = 0
ranges = determine_ranges(staffs)
voices = []
for voice in range(number_of_voices):
staffs_for_voice = [staff.staffs[voice] for staff in staffs]
result_for_voice = []
for staff in staffs_for_voice:
if len(staff.symbols) == 0:
continue
result_staff = parse_staff_image(debug, ranges, i, staff, predictions)
if result_staff is None:
eprint("Staff was filtered out", i)
i += 1
continue
if result_staff.is_empty():
eprint("Skipping empty staff", i)
i += 1
continue
remember_new_line(result_staff.measures)
result_for_voice.append(result_staff)
i += 1
# Piano music can have a change of clef, while for other instruments
# we assume that the clef is the same for all staffs.
# The number of voices is the only way we can distinguish between the two.
force_single_clef_type = number_of_voices == 1
voices.append(merge_and_clean(result_for_voice, force_single_clef_type))
return voices
| 17,986 | Python | .py | 386 | 37.466321 | 104 | 0.625057 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,844 | staff_dewarping.py | liebharc_homr/homr/staff_dewarping.py | import cv2
import numpy as np
import PIL.Image
from skimage import transform
from homr.debug import Debug
from homr.model import Staff
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
class StaffDewarping:
def __init__(self, tform: transform.PiecewiseAffineTransform | None):
self.tform = tform
def dewarp(self, image: NDArray, fill_color: int = 1, order: int = 1) -> NDArray:
if self.tform is None:
return image
return transform.warp( # type: ignore
image,
self.tform.inverse,
output_shape=image.shape,
mode="constant",
order=order,
cval=fill_color,
)
def dewarp_point(self, point: tuple[float, float]) -> tuple[float, float]:
if self.tform is None:
return point
return self.tform(point) # type: ignore
def is_point_on_image(pts: tuple[int, int], image: NDArray) -> bool:
height, width = image.shape[:2]
margin = 10
if pts[0] < margin or pts[0] > width - margin or pts[1] < margin or pts[1] > height - margin:
return False
return True
def calculate_span_and_optimal_points(
staff: Staff, image: NDArray
) -> tuple[list[list[tuple[int, int]]], list[list[tuple[int, int]]]]:
span_points: list[list[tuple[int, int]]] = []
optimal_points: list[list[tuple[int, int]]] = []
first_y_offset = None
number_of_y_intervals = 6
if int(image.shape[0] / number_of_y_intervals) == 0:
return span_points, optimal_points
for y in range(2, image.shape[0] - 2, int(image.shape[0] / number_of_y_intervals)):
line_points: list[tuple[int, int]] = []
for x in range(2, image.shape[1], 80):
y_values = staff.get_at(x)
if y_values is not None:
y_offset = y_values.y[2]
if not first_y_offset:
first_y_offset = y_offset
y_delta = 0
else:
y_delta = y_offset - first_y_offset
point = (x, y + y_delta)
if is_point_on_image(point, image):
line_points.append(point)
minimum_number_of_points = 2
if len(line_points) > minimum_number_of_points:
average_y = sum([p[1] for p in line_points]) / len(line_points)
span_points.append(line_points)
optimal_points.append([(p[0], int(average_y)) for p in line_points])
return span_points, optimal_points
class FastPiecewiseAffineTransform(transform.PiecewiseAffineTransform):
"""
From https://github.com/scikit-image/scikit-image/pull/6963/files
"""
def __call__(self, coords): # type: ignore
coords = np.asarray(coords)
simplex = self._tesselation.find_simplex(coords)
affines = np.stack([affine.params for affine in self.affines])[simplex]
points = np.c_[coords, np.ones((coords.shape[0], 1))]
result = np.einsum("ikj,ij->ik", affines, points)
result[simplex == -1, :] = -1
result = result[:, :2]
return result
def calculate_dewarp_transformation(
image: NDArray,
source: list[list[tuple[int, int]]],
destination: list[list[tuple[int, int]]],
fast: bool = False,
) -> StaffDewarping:
def add_image_edges_to_lines(
lines: list[list[tuple[int, int]]],
) -> list[list[tuple[int, int]]]:
lines.insert(0, [(0, 0), (0, image.shape[1])])
lines.append([(image.shape[0], 0), (image.shape[0], image.shape[1])])
return lines
def add_first_and_last_point_to_every_line(
lines: list[list[tuple[int, int]]],
) -> list[list[tuple[int, int]]]:
for line in lines:
line.insert(0, (0, line[0][1]))
line.append((image.shape[1], line[-1][1]))
return lines
source = add_image_edges_to_lines(add_first_and_last_point_to_every_line(source))
destination = add_image_edges_to_lines(add_first_and_last_point_to_every_line(destination))
# Convert your points to numpy arrays
source_conc = np.concatenate(source)
destination_conc = np.concatenate(destination)
tform = FastPiecewiseAffineTransform() if fast else transform.PiecewiseAffineTransform() # type: ignore
tform.estimate(source_conc, destination_conc) # type: ignore
return StaffDewarping(tform)
def dewarp_staff_image(image: NDArray, staff: Staff, index: int, debug: Debug) -> StaffDewarping:
try:
span_points, optimal_points = calculate_span_and_optimal_points(staff, image)
if debug.debug:
debug_img = image.copy()
for line in span_points:
for point in line:
cv2.circle(debug_img, [int(point[0]), int(point[1])], 5, (0, 0, 255), -1)
for line in optimal_points:
for point in line:
cv2.circle(debug_img, [int(point[0]), int(point[1])], 5, (255, 0, 0), -1)
debug.write_image_with_fixed_suffix(f"_staff-{index}_debug_span_points.png", debug_img)
return calculate_dewarp_transformation(image, span_points, optimal_points)
except Exception as e:
eprint("Dewarping failed for staff", index, "with error", e)
return StaffDewarping(None)
def warp_image_randomly(image: PIL.Image.Image) -> PIL.Image.Image:
array = np.array(image)
result = warp_image_array_randomly(array)
return PIL.Image.fromarray(result)
def warp_image_array_randomly(image: NDArray) -> NDArray:
center = (image.shape[1] // 2, image.shape[0] // 2)
num_points = 3
upper = [(i * image.shape[1] // num_points, 0) for i in range(num_points)]
source = [(i * image.shape[1] // num_points, center[1]) for i in range(num_points)]
lower = [(i * image.shape[1] // num_points, image.shape[0]) for i in range(num_points)]
max_random_offset = 10
destination = [
(
i * image.shape[1] // num_points,
center[1] + np.random.randint(-max_random_offset, max_random_offset),
)
for i in range(num_points)
]
result = calculate_dewarp_transformation(
image, [upper, source, lower], [upper, destination, lower], fast=True
).dewarp(image, order=3)
return (255 * result).astype(np.uint8)
if __name__ == "__main__":
import sys
image = cv2.imread(sys.argv[1])
cv2.imwrite(sys.argv[2], warp_image_array_randomly(image))
| 6,448 | Python | .py | 143 | 36.972028 | 108 | 0.62207 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,845 | circle_of_fifths.py | liebharc_homr/homr/circle_of_fifths.py | import abc
from abc import ABC
from homr.simple_logging import eprint
circle_of_fifth_notes_positive = ["F", "C", "G", "D", "A", "E", "B"]
circle_of_fifth_notes_negative = list(reversed(circle_of_fifth_notes_positive))
def get_circle_of_fifth_notes(circle_of_fifth: int) -> list[str]:
if circle_of_fifth >= 0:
return circle_of_fifth_notes_positive[0:circle_of_fifth]
else:
return circle_of_fifth_notes_negative[0 : abs(circle_of_fifth)]
definition = {
-7: "CbM",
-6: "GbM",
-5: "DbM",
-4: "AbM",
-3: "EbM",
-2: "BbM",
-1: "FM",
0: "CM",
1: "GM",
2: "DM",
3: "AM",
4: "EM",
5: "BM",
6: "F#M",
7: "C#M",
}
inv_definition = {v: k for k, v in definition.items()}
def circle_of_fifth_to_key_signature(circle: int) -> str:
return definition[circle]
def key_signature_to_circle_of_fifth(key_signature: str) -> int:
if key_signature not in inv_definition:
eprint("Warning: Unknown key signature", key_signature)
return 0
return inv_definition[key_signature]
def repeat_note_for_all_octaves(notes: list[str]) -> list[str]:
"""
Takes a list of notes and returns a list of notes that includes all octaves.
"""
result = []
for note in notes:
for octave in range(11):
result.append(note + str(octave))
return result
class AbstractKeyTransformation(ABC):
@abc.abstractmethod
def add_accidental(self, note: str, accidental: str) -> str:
pass
@abc.abstractmethod
def reset_at_end_of_measure(self) -> "AbstractKeyTransformation":
pass
class NoKeyTransformation(AbstractKeyTransformation):
def __init__(self) -> None:
self.current_accidentals: dict[str, str] = {}
def add_accidental(self, note: str, accidental: str) -> str:
if accidental != "" and (
note not in self.current_accidentals or self.current_accidentals[note] != accidental
):
self.current_accidentals[note] = accidental
return accidental
else:
return ""
def reset_at_end_of_measure(self) -> "NoKeyTransformation":
return NoKeyTransformation()
class KeyTransformation(AbstractKeyTransformation):
def __init__(self, circle_of_fifth: int):
self.circle_of_fifth = circle_of_fifth
self.sharps: set[str] = set()
self.flats: set[str] = set()
if circle_of_fifth > 0:
self.sharps = set(
repeat_note_for_all_octaves(circle_of_fifth_notes_positive[0:circle_of_fifth])
)
elif circle_of_fifth < 0:
self.flats = set(
repeat_note_for_all_octaves(
circle_of_fifth_notes_negative[0 : abs(circle_of_fifth)]
)
)
def add_accidental(self, note: str, accidental: str | None) -> str:
"""
Returns the accidental if it wasn't placed before.
"""
if accidental in ["#", "b", "N"]:
previous_accidental = "N"
if note in self.sharps:
self.sharps.remove(note)
previous_accidental = "#"
if note in self.flats:
self.flats.remove(note)
previous_accidental = "b"
if accidental == "#":
self.sharps.add(note)
elif accidental == "b":
self.flats.add(note)
return accidental if accidental != previous_accidental else ""
else:
if note in self.sharps:
self.sharps.remove(note)
return "N"
if note in self.flats:
self.flats.remove(note)
return "N"
return ""
def reset_at_end_of_measure(self) -> "KeyTransformation":
return KeyTransformation(self.circle_of_fifth)
| 4,000 | Python | .py | 106 | 27.90566 | 97 | 0.573514 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,846 | xml_generator.py | liebharc_homr/homr/xml_generator.py | import musicxml.xmlelement.xmlelement as mxl # type: ignore
from homr import constants
from homr.results import (
DurationModifier,
ResultChord,
ResultClef,
ResultMeasure,
ResultNote,
ResultStaff,
ResultTimeSignature,
)
class XmlGeneratorArguments:
def __init__(self, large_page: bool | None, metronome: int | None, tempo: int | None):
self.large_page = large_page
self.metronome = metronome
self.tempo = tempo
def build_work(title_text: str) -> mxl.XMLWork: # type: ignore
work = mxl.XMLWork()
title = mxl.XMLWorkTitle()
title._value = title_text
work.add_child(title)
return work
def build_defaults(args: XmlGeneratorArguments) -> mxl.XMLDefaults: # type: ignore
if not args.large_page:
return mxl.XMLDefaults()
# These values are larger than a letter or A4 format so that
# we only have to break staffs with every new detected staff
# This works well for electronic formats, if the results are supposed
# to get printed then they might need to be scaled down to fit the page
page_width = 110 # Unit is in tenths: https://www.w3.org/2021/06/musicxml40/musicxml-reference/elements/page-height/
page_height = 300
defaults = mxl.XMLDefaults()
page_layout = mxl.XMLPageLayout()
page_height = mxl.XMLPageHeight(value_=page_height)
page_width = mxl.XMLPageWidth(value_=page_width)
page_layout.add_child(page_height)
page_layout.add_child(page_width)
defaults.add_child(page_layout)
return defaults
def get_part_id(index: int) -> str:
return "P" + str(index + 1)
def build_part_list(staffs: int) -> mxl.XMLPartList: # type: ignore
part_list = mxl.XMLPartList()
for part in range(staffs):
part_id = get_part_id(part)
score_part = mxl.XMLScorePart(id=part_id)
part_name = mxl.XMLPartName(value_="")
score_part.add_child(part_name)
score_instrument = mxl.XMLScoreInstrument(id=part_id + "-I1")
instrument_name = mxl.XMLInstrumentName(value_="Piano")
score_instrument.add_child(instrument_name)
instrument_sound = mxl.XMLInstrumentSound(value_="keyboard.piano")
score_instrument.add_child(instrument_sound)
score_part.add_child(score_instrument)
midi_instrument = mxl.XMLMidiInstrument(id=part_id + "-I1")
midi_instrument.add_child(mxl.XMLMidiChannel(value_=1))
midi_instrument.add_child(mxl.XMLMidiProgram(value_=1))
midi_instrument.add_child(mxl.XMLVolume(value_=100))
midi_instrument.add_child(mxl.XMLPan(value_=0))
score_part.add_child(midi_instrument)
part_list.add_child(score_part)
return part_list
def build_or_get_attributes(measure: mxl.XMLMeasure) -> mxl.XMLAttributes: # type: ignore
for child in measure.get_children_of_type(mxl.XMLAttributes):
return child
attributes = mxl.XMLAttributes()
measure.add_child(attributes)
return attributes
def build_clef(model_clef: ResultClef, attributes: mxl.XMLAttributes) -> None: # type: ignore
attributes.add_child(mxl.XMLDivisions(value_=constants.duration_of_quarter))
key = mxl.XMLKey()
fifth = mxl.XMLFifths(value_=model_clef.circle_of_fifth)
attributes.add_child(key)
key.add_child(fifth)
clef = mxl.XMLClef()
attributes.add_child(clef)
clef.add_child(mxl.XMLSign(value_=model_clef.clef_type.sign))
clef.add_child(mxl.XMLLine(value_=model_clef.clef_type.line))
def build_time_signature( # type: ignore
model_time_signature: ResultTimeSignature, attributes: mxl.XMLAttributes
) -> None:
time = mxl.XMLTime()
attributes.add_child(time)
time.add_child(mxl.XMLBeats(value_=str(model_time_signature.numerator)))
time.add_child(mxl.XMLBeatType(value_=str(model_time_signature.denominator)))
def build_rest(model_rest: ResultChord) -> mxl.XMLNote: # type: ignore
note = mxl.XMLNote()
note.add_child(mxl.XMLRest(measure="yes"))
note.add_child(mxl.XMLDuration(value_=model_rest.duration.duration))
note.add_child(mxl.XMLType(value_=model_rest.duration.duration_name))
note.add_child(mxl.XMLStaff(value_=1))
return note
def build_note(model_note: ResultNote, is_chord=False) -> mxl.XMLNote: # type: ignore
note = mxl.XMLNote()
if is_chord:
note.add_child(mxl.XMLChord())
pitch = mxl.XMLPitch()
model_pitch = model_note.pitch
pitch.add_child(mxl.XMLStep(value_=model_pitch.step))
if model_pitch.alter is not None:
pitch.add_child(mxl.XMLAlter(value_=model_pitch.alter))
else:
pitch.add_child(mxl.XMLAlter(value_=0))
pitch.add_child(mxl.XMLOctave(value_=model_pitch.octave))
note.add_child(pitch)
model_duration = model_note.duration
note.add_child(mxl.XMLType(value_=model_duration.duration_name))
note.add_child(mxl.XMLDuration(value_=model_duration.duration))
note.add_child(mxl.XMLStaff(value_=1))
note.add_child(mxl.XMLVoice(value_="1"))
if model_duration.modifier == DurationModifier.DOT:
note.add_child(mxl.XMLDot())
elif model_duration.modifier == DurationModifier.TRIPLET:
time_modification = mxl.XMLTimeModification()
time_modification.add_child(mxl.XMLActualNotes(value_=3))
time_modification.add_child(mxl.XMLNormalNotes(value_=2))
note.add_child(time_modification)
return note
def build_note_group(note_group: ResultChord) -> list[mxl.XMLNote]: # type: ignore
result = []
is_first = True
for note in note_group.notes:
result.append(build_note(note, not is_first))
is_first = False
return result
def build_chord(chord: ResultChord) -> list[mxl.XMLNote]: # type: ignore
if chord.is_rest:
return [build_rest(chord)]
return build_note_group(chord)
def build_add_time_direction(args: XmlGeneratorArguments) -> mxl.XMLDirection | None: # type: ignore
if not args.metronome:
return None
direction = mxl.XMLDirection()
direction_type = mxl.XMLDirectionType()
direction.add_child(direction_type)
metronome = mxl.XMLMetronome()
direction_type.add_child(metronome)
beat_unit = mxl.XMLBeatUnit(value_="quarter")
metronome.add_child(beat_unit)
per_minute = mxl.XMLPerMinute(value_=str(args.metronome))
metronome.add_child(per_minute)
if args.tempo:
direction.add_child(mxl.XMLSound(tempo=args.tempo))
else:
direction.add_child(mxl.XMLSound(tempo=args.metronome))
return direction
def build_measure( # type: ignore
args: XmlGeneratorArguments, measure: ResultMeasure, is_first_part: bool, measure_number: int
) -> mxl.XMLMeasure:
result = mxl.XMLMeasure(number=str(measure_number))
is_first_measure = measure_number == 1
if is_first_measure and is_first_part:
direction = build_add_time_direction(args)
if direction:
result.add_child(direction)
if measure.is_new_line and not is_first_measure:
result.add_child(mxl.XMLPrint(new_system="yes"))
for symbol in measure.symbols:
if isinstance(symbol, ResultClef):
attributes = build_or_get_attributes(result)
build_clef(symbol, attributes)
elif isinstance(symbol, ResultTimeSignature):
attributes = build_or_get_attributes(result)
build_time_signature(symbol, attributes)
elif isinstance(symbol, ResultChord):
for element in build_chord(symbol):
result.add_child(element)
return result
def build_part( # type: ignore
args: XmlGeneratorArguments, staff: ResultStaff, index: int
) -> mxl.XMLPart:
part = mxl.XMLPart(id=get_part_id(index))
measure_number = 1
is_first_part = index == 0
for measure in staff.measures:
part.add_child(build_measure(args, measure, is_first_part, measure_number))
measure_number += 1
return part
def generate_xml( # type: ignore
args: XmlGeneratorArguments, staffs: list[ResultStaff], title: str
) -> mxl.XMLElement:
root = mxl.XMLScorePartwise()
root.add_child(build_work(title))
root.add_child(build_defaults(args))
root.add_child(build_part_list(len(staffs)))
for index, staff in enumerate(staffs):
root.add_child(build_part(args, staff, index))
return root
| 8,535 | Python | .py | 188 | 38.271277 | 122 | 0.688972 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,847 | staff_parsing_tromr.py | liebharc_homr/homr/staff_parsing_tromr.py | import re
from collections import Counter
import cv2
import numpy as np
from homr import constants
from homr.debug import AttentionDebug
from homr.model import Staff
from homr.results import ClefType, ResultStaff, ResultTimeSignature
from homr.simple_logging import eprint
from homr.tr_omr_parser import TrOMRParser
from homr.transformer.configs import default_config
from homr.transformer.staff2score import Staff2Score
from homr.type_definitions import NDArray
inference: Staff2Score | None = None
def parse_staff_tromr(
staff: Staff, staff_image: NDArray, debug: AttentionDebug | None
) -> ResultStaff | None:
return predict_best(staff_image, debug=debug, staff=staff)
def apply_clahe(staff_image: NDArray, clip_limit: float = 2.0, kernel_size: int = 8) -> NDArray:
gray_image = cv2.cvtColor(staff_image, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(kernel_size, kernel_size))
gray_image = clahe.apply(gray_image)
return cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)
def build_image_options(staff_image: NDArray) -> list[NDArray]:
denoised1 = cv2.fastNlMeansDenoisingColored(staff_image, None, 10, 10, 7, 21)
return [
staff_image,
denoised1,
apply_clahe(denoised1),
]
def _fill_in_time_signature(staff: ResultStaff) -> None:
average_measure_length = np.median([m.length_in_quarters() for m in staff.measures])
for symbol in staff.get_symbols():
if isinstance(symbol, ResultTimeSignature):
beat_duration = 4 / symbol.denominator * constants.duration_of_quarter
symbol.numerator = round(average_measure_length / beat_duration)
def predict_best(
org_image: NDArray, staff: Staff, debug: AttentionDebug | None = None
) -> ResultStaff | None:
global inference # noqa: PLW0603
if inference is None:
inference = Staff2Score(default_config)
images = build_image_options(org_image)
notes = staff.get_notes_and_groups()
best_distance: float = 0
best_attempt = 0
best_result: ResultStaff = ResultStaff([])
for attempt, image in enumerate(images):
if debug is not None:
debug.reset()
result = inference.predict(
image,
debug=debug,
)
parser = TrOMRParser()
result_staff = parser.parse_tr_omr_output(str.join("", result))
clef_type = _get_clef_type(result[0])
if clef_type is None:
# Returning early is no clef is found is not optimal,
# but it makes sure that we get a result and it's a corner case,
# which is not worth the effort to handle right now.
eprint("Failed to find clef type in", result)
return result_staff
actual = [symbol for symbol in result[0].split("+") if symbol.startswith("note")]
expected = [note.to_tr_omr_note(clef_type) for note in notes]
actual = _flatten_result(actual)
expected = _flatten_result(expected)
distance = _differences(actual, expected)
diff_accidentals = abs(
_number_of_accidentals_in_model(staff) - parser.number_of_accidentals()
)
measure_length_variance = _measure_length_variance(result_staff)
number_of_structural_elements = (
_superfluous_number(parser.number_of_clefs())
+ _superfluous_number(parser.number_of_key_signatures())
+ _superfluous_number(parser.number_of_time_signatures())
)
total_rating = (
distance + diff_accidentals + measure_length_variance + number_of_structural_elements
) / max(min(len(expected), len(actual)), 1)
if best_result.is_empty() or total_rating < best_distance:
best_distance = total_rating
best_result = result_staff
best_attempt = attempt
_fill_in_time_signature(best_result)
eprint("Taking attempt", best_attempt + 1, "with distance", best_distance, best_result)
return best_result
def _superfluous_number(count: int) -> int:
"""
Assumes that the item should be present at most once.
"""
return count - 1 if count > 1 else 0
def _number_of_accidentals_in_model(staff: Staff) -> int:
return len(staff.get_accidentals())
def _get_clef_type(result: str) -> ClefType | None:
match = re.search(r"clef-([A-G])([0-9])", result)
if match is None:
return None
return ClefType(match.group(1), int(match.group(2)))
def _flatten_result(result: list[str]) -> list[str]:
notes = []
for group in result:
for symbol in group.split("|"):
just_pitch = symbol.split("_")[0]
just_pitch = just_pitch.replace("#", "").replace("b", "")
notes.append(just_pitch)
return notes
def _measure_length_variance(result: ResultStaff) -> float:
durations = [m.length_in_quarters() for m in result.measures]
return float(np.std(durations - np.mean(durations))) # type: ignore
def _differences(actual: list[str], expected: list[str]) -> int:
counter1 = Counter(actual)
counter2 = Counter(expected)
return sum((counter1 - counter2).values()) + sum((counter2 - counter1).values())
| 5,206 | Python | .py | 114 | 38.929825 | 97 | 0.676808 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,848 | simple_logging.py | liebharc_homr/homr/simple_logging.py | import sys
from typing import Any
def eprint(*args: Any, **kwargs: Any) -> None:
"""
A logger with differnt log levels felt overkill for this project.
So we just have one logger that logs to stderr.
"""
print(*args, file=sys.stderr, **kwargs) # noqa: T201
| 289 | Python | .py | 8 | 31.125 | 70 | 0.66426 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,849 | constants.py | liebharc_homr/homr/constants.py | number_of_lines_on_a_staff = 5
max_number_of_ledger_lines = 4
def tolerance_for_staff_line_detection(unit_size: float) -> float:
return unit_size / 3
def max_line_gap_size(unit_size: float) -> float:
return 5 * unit_size
def is_short_line(unit_size: float) -> float:
return unit_size / 5
def is_short_connected_line(unit_size: float) -> float:
return 2 * unit_size
def min_height_for_brace_rough(unit_size: float) -> float:
return 2 * unit_size
def max_width_for_brace_rough(unit_size: float) -> float:
return 3 * unit_size
def min_height_for_brace(unit_size: float) -> float:
return 4 * unit_size
def tolerance_for_touching_bar_lines(unit_size: float) -> int:
return int(round(unit_size * 2))
def tolerance_for_touching_clefs(unit_size: float) -> int:
return int(round(unit_size * 2))
def tolerance_for_staff_at_any_point(unit_size: float) -> int:
return 0
def tolerance_note_grouping(unit_size: float) -> float:
return 1 * unit_size
def bar_line_max_width(unit_size: float) -> float:
return 2 * unit_size
def bar_line_min_height(unit_size: float) -> float:
return 3 * unit_size
def bar_line_to_staff_tolerance(unit_size: float) -> float:
return 4 * unit_size
def black_spot_removal_threshold(unit_size: float) -> float:
return 2 * unit_size
staff_line_segment_x_tolerance = 10
notehead_type_threshold = 0.8
max_color_distance_of_staffs = 0.25
# We don't have to worried about mis-detections,
# because if not all staffs group the same way then we break the staffs up again
minimum_connections_to_form_combined_staff = 1
duration_of_quarter = 16
image_noise_limit = 50
staff_position_tolerance = 50
max_angle_for_lines_to_be_parallel = 5
NOTEHEAD_SIZE_RATIO = 1.285714 # width/height
def minimum_rest_width_or_height(unit_size: float) -> float:
return 0.7 * unit_size
def maximum_rest_width_or_height(unit_size: float) -> float:
return 3.5 * unit_size
def minimum_accidental_width_or_height(unit_size: float) -> float:
return 0.5 * unit_size
def maximum_accidental_width_or_height(unit_size: float) -> float:
return 3 * unit_size
# We use ³ as triplet indicator as it's not a valid duration name
# or note name and thus we have no risk of confusion
triplet_symbol = "³"
| 2,408 | Python | .py | 54 | 39.222222 | 81 | 0.712189 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,850 | bounding_boxes.py | liebharc_homr/homr/bounding_boxes.py | import math
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Any, TypeVar, cast
import cv2
import cv2.typing as cvt
import numpy as np
from scipy import ndimage # type: ignore
from homr import constants
from homr.image_utils import crop_image
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
TBounds = TypeVar("TBounds", bound="RotatedBoundingBox | BoundingBox | BoundingEllipse")
def rotate_point_around_center(
point: tuple[float, float], center: tuple[float, float], angle: float
) -> tuple[float, float]:
return (
point[0] * np.cos(angle) - point[1] * np.sin(angle) + center[0],
point[0] * np.sin(angle) + point[1] * np.cos(angle) + center[1],
)
def calculate_edges_of_rotated_rectangle(
box: cvt.RotatedRect,
) -> tuple[tuple[float, float], tuple[float, float], tuple[float, float], tuple[float, float]]:
half_size = np.array([box[1][0] / 2, box[1][1] / 2])
center = box[0]
top_left = center - half_size
bottom_left = center + np.array([-half_size[0], half_size[1]])
top_right = center + np.array([half_size[0], -half_size[1]])
bottom_right = center + half_size
return (
(top_left[0], top_left[1]),
(bottom_left[0], bottom_left[1]),
(top_right[0], top_right[1]),
(bottom_right[0], bottom_right[1]),
)
def do_polygons_overlap(poly1: cvt.MatLike, poly2: cvt.MatLike) -> bool:
# Check if any point of one ellipse is inside the other ellipse
for point in poly1:
if cv2.pointPolygonTest(poly2, (float(point[0]), float(point[1])), False) >= 0:
return True
for point in poly2:
if cv2.pointPolygonTest(poly1, (float(point[0]), float(point[1])), False) >= 0:
return True
return False
class DebugDrawable(ABC):
@abstractmethod
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (0, 0, 255)) -> None:
pass
class AnyPolygon(DebugDrawable):
def __init__(self, polygon: Any):
self.polygon = polygon
class BoundingBox(AnyPolygon):
"""
A bounding box in the format of (x1, y1, x2, y2)
"""
def __init__(self, box: cvt.Rect, contours: cvt.MatLike, debug_id: int = 0):
self.debug_id = debug_id
self.contours = contours
self.box = box
self.center = (box[0] + box[2]) / 2, (box[1] + box[3]) / 2
self.size = (box[2] - box[0], box[3] - box[1])
self.rotated_box = (self.center, self.size, 0)
self.size = (box[2] - box[0], box[3] - box[1])
super().__init__(cv2.boxPoints(self.rotated_box).astype(np.int64))
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (0, 0, 255)) -> None:
cv2.rectangle(
img,
(int(self.box[0]), int(self.box[1])),
(int(self.box[2]), int(self.box[3])),
color,
2,
)
def rotate_and_extract(self, img: NDArray, angle: float) -> NDArray:
rotated = ndimage.rotate(
crop_image(img, self.box[0], self.box[1], self.box[2] + 1, self.box[3] + 1), angle
)
return cast(NDArray, rotated)
def extract(self, img: NDArray) -> NDArray:
return crop_image(img, self.box[0], self.box[1], self.box[2] + 1, self.box[3] + 1)
def increase_height(self, y_top: int, y_bottom: int) -> "BoundingBox":
return BoundingBox(
(
self.box[0],
min(self.box[1], int(y_top)),
self.box[2],
max(self.box[1], int(y_bottom)),
),
self.contours,
self.debug_id,
)
def increase_width(self, x_left: int, x_right: int) -> "BoundingBox":
return BoundingBox(
(
min(self.box[0], int(x_left)),
self.box[1],
max(self.box[0], int(x_right)),
self.box[3],
),
self.contours,
self.debug_id,
)
def split_into_quadrants(self) -> list["BoundingBox"]:
"""
Splits the bounding box into four equally sized quadrants.
It returns them in the order: top left, top right, bottom left, bottom right
"""
x_center = int(self.box[0] + self.size[0] / 2)
y_center = int(self.box[1] + self.size[1] / 2)
return [
BoundingBox(
(self.box[0], self.box[1], x_center, y_center), self.contours, self.debug_id
),
BoundingBox(
(x_center, self.box[1], self.box[2], y_center), self.contours, self.debug_id
),
BoundingBox(
(self.box[0], y_center, x_center, self.box[3]), self.contours, self.debug_id
),
BoundingBox(
(x_center, y_center, self.box[2], self.box[3]), self.contours, self.debug_id
),
]
class AngledBoundingBox(AnyPolygon):
def __init__(
self, box: cvt.RotatedRect, contours: cvt.MatLike, polygon: Any, debug_id: int = 0
):
super().__init__(polygon)
self.debug_id = debug_id
self.contours = contours
angle = box[2]
self.box: cvt.RotatedRect
if angle > 135: # noqa: PLR2004
angle = angle - 180
self.box = ((box[0][0], box[0][1]), (box[1][0], box[1][1]), angle)
elif angle < -135: # noqa: PLR2004
angle = angle + 180
self.box = ((box[0][0], box[0][1]), (box[1][0], box[1][1]), angle)
elif angle > 45: # noqa: PLR2004
angle = angle - 90
self.box = ((box[0][0], box[0][1]), (box[1][1], box[1][0]), angle)
elif angle < -45: # noqa: PLR2004
angle = angle + 90
self.box = ((box[0][0], box[0][1]), (box[1][1], box[1][0]), angle)
else:
self.box = ((box[0][0], box[0][1]), (box[1][0], box[1][1]), angle)
self.center = self.box[0]
self.size = self.box[1]
self.angle = self.box[2]
self.top_left, self.bottom_left, self.top_right, self.bottom_right = (
calculate_edges_of_rotated_rectangle(self.box)
)
self.polygon = polygon
def is_overlapping(self, other: AnyPolygon) -> bool:
if not self._can_shapes_possibly_touch(other):
return False
return do_polygons_overlap(self.polygon, other.polygon)
def is_overlapping_with_any(self, others: Sequence["AngledBoundingBox"]) -> bool:
for other in others:
if self.is_overlapping(other):
return True
return False
def _can_shapes_possibly_touch(self, other: "AnyPolygon") -> bool:
"""
A fast check if the two shapes can possibly touch. If this returns False,
the two shapes do not touch.
If this returns True, the two shapes might touch and further checks are necessary.
"""
# Get the centers and major axes of the rectangles
center1, axes1, _ = self.box
center2: Sequence[float]
axes2: Sequence[float]
if isinstance(other, BoundingBox):
center2, axes2, _ = (
other.rotated_box
) # (variable) rotated_box: tuple[tuple[float, float], tuple[int, int], Literal[0]]
elif isinstance(other, AngledBoundingBox):
center2, axes2, _ = (
other.box
) # (variable) box: tuple[tuple[float, float], tuple[int, int], float]
else:
raise ValueError(f"Unknown type {type(other)}")
major_axis1 = max(axes1)
major_axis2 = max(axes2)
# Calculate the distance between the centers
distance = ((center1[0] - center2[0]) ** 2 + (center1[1] - center2[1]) ** 2) ** 0.5
# If the distance is greater than the sum of the major axes, the rectangles do not overlap
if distance > major_axis1 + major_axis2:
return False
return True
@abstractmethod
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (0, 0, 255)) -> None:
pass
@abstractmethod
def extract_point_sequence_from_image(self, img: NDArray) -> NDArray:
pass
def get_color_ratio(self, img: NDArray) -> float:
"""
Gets the ratio of white to total pixels for this bounding box in the image.
"""
colors = self.extract_point_sequence_from_image(img)
white = len([color for color in colors if color == 1])
total = len(colors)
ratio = white / total
return ratio
def crop_rect_from_image(self, img: NDArray) -> NDArray:
return crop_image(
img,
self.top_left[0],
self.top_left[1],
self.bottom_right[0] + 1,
self.bottom_right[1] + 1,
)
class RotatedBoundingBox(AngledBoundingBox):
def __init__(self, box: cvt.RotatedRect, contours: cvt.MatLike, debug_id: int = 0):
super().__init__(box, contours, cv2.boxPoints(box).astype(np.int64), debug_id)
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (0, 0, 255)) -> None:
box = cv2.boxPoints(self.box).astype(np.int64)
cv2.drawContours(img, [box], 0, color, 2)
def is_intersecting(self, other: "RotatedBoundingBox") -> bool:
# TODO: How is this different from is_overlapping?
return cv2.rotatedRectangleIntersection(self.box, other.box)[0] != cv2.INTERSECT_NONE
def is_overlapping_extrapolated(self, other: "RotatedBoundingBox", unit_size: float) -> bool:
return self._get_intersection_point_extrapolated(other, unit_size) is not None
def make_box_thicker(self, thickness: int) -> "RotatedBoundingBox":
if thickness <= 0:
return self
return RotatedBoundingBox(
(
(self.box[0][0], self.box[0][1]),
(self.box[1][0] + thickness, self.box[1][1] + thickness),
self.box[2],
),
self.contours,
self.debug_id,
)
def move_to_x_horizontal_by(self, x_delta: int) -> "RotatedBoundingBox":
new_x = self.center[0] + x_delta
return RotatedBoundingBox(
((new_x, self.center[1]), self.box[1], self.box[2]), self.contours, self.debug_id
)
def make_box_taller(self, thickness: int) -> "RotatedBoundingBox":
return RotatedBoundingBox(
(
(self.box[0][0], self.box[0][1]),
(self.box[1][0], self.box[1][1] + thickness),
self.box[2],
),
self.contours,
self.debug_id,
)
def get_center_extrapolated(self, x: float) -> float:
return (x - self.box[0][0]) * np.tan(self.box[2] / 180 * np.pi) + self.box[0][1] # type: ignore
def _get_intersection_point_extrapolated(
self, other: "RotatedBoundingBox", unit_size: float
) -> tuple[float, float] | None:
if self.box[0][0] > other.box[0][0]:
left, right = other, self
else:
left, right = self, other
center: float = float(np.mean([left.center[0], right.center[0]]))
tolerance = constants.tolerance_for_staff_line_detection(unit_size)
max_gap = constants.max_line_gap_size(unit_size)
distance_between_left_and_center_considering_size = (
center - left.center[0] - left.size[0] // 2
)
distance_between_right_and_center_considering_size = (
right.center[0] - center - right.size[0] // 2
)
if (
distance_between_left_and_center_considering_size > max_gap
or distance_between_right_and_center_considering_size > max_gap
):
return None
left_at_center = left.get_center_extrapolated(center)
right_at_center = right.get_center_extrapolated(center)
if abs(left_at_center - right_at_center) > tolerance:
return None
return (center, (left_at_center + right_at_center) / 2)
def extract_point_sequence_from_image(self, img: NDArray) -> NDArray:
rectangle = self.box
poly = cv2.boxPoints(rectangle).astype(np.int64)
# Create an empty mask
mask = np.zeros_like(img)
# Fill the polygon in the mask
cv2.fillPoly(mask, [poly], 1) # type: ignore
# Use the mask to index the image
points = img[mask == 1]
return points # type: ignore
def to_bounding_box(self) -> BoundingBox:
return BoundingBox(
(
int(self.top_left[0]),
int(self.top_left[1]),
int(self.bottom_right[0]),
int(self.bottom_right[1]),
),
self.contours,
self.debug_id,
)
class BoundingEllipse(AngledBoundingBox):
def __init__(self, box: cvt.RotatedRect, contours: cvt.MatLike, debug_id: int = 0):
super().__init__(
box,
contours,
cv2.ellipse2Poly(
(int(box[0][0]), int(box[0][1])),
(int(box[1][0] / 2), int(box[1][1] / 2)),
int(box[2]),
0,
360,
1,
),
debug_id,
)
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (0, 0, 255)) -> None:
cv2.ellipse(img, self.box, color=color, thickness=2)
def make_box_thicker(self, thickness: int) -> "BoundingEllipse":
return BoundingEllipse(
(
(self.box[0][0], self.box[0][1]),
(self.box[1][0] + thickness, self.box[1][1] + thickness),
self.box[2],
),
self.contours,
self.debug_id,
)
def make_box_taller(self, thickness: int) -> "RotatedBoundingBox":
return RotatedBoundingBox(
(
(self.box[0][0], self.box[0][1]),
(self.box[1][0], self.box[1][1] + thickness),
self.box[2],
),
self.contours,
self.debug_id,
)
def extract_point_sequence_from_image(self, img: NDArray) -> NDArray:
# Create an empty mask
mask = np.zeros_like(img)
# Fill the polygon in the mask
cv2.fillPoly(mask, [self.polygon], 1) # type: ignore
# Use the mask to index the image
points = img[mask == 1]
return points # type: ignore
def create_bounding_boxes(img: NDArray) -> list[BoundingBox]:
contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
boxes = []
for i, countour in enumerate(contours):
boxes.append(create_bounding_box(countour, debug_id=i))
return boxes
def create_bounding_box(contour: cvt.MatLike, debug_id: int) -> BoundingBox:
x, y, w, h = cv2.boundingRect(contour)
box = (x, y, x + w, y + h)
return BoundingBox(box, contour, debug_id=debug_id)
def _has_box_valid_size(box: cvt.RotatedRect) -> bool:
return (
not math.isnan(box[1][0]) and not math.isnan(box[1][1]) and box[1][0] > 0 and box[1][1] > 0
)
def create_rotated_bounding_boxes(
img: NDArray,
skip_merging: bool = False,
min_size: tuple[int, int] | None = None,
max_size: tuple[int, int] | None = None,
thicken_boxes: int | None = None,
) -> list[RotatedBoundingBox]:
contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
boxes: list[RotatedBoundingBox] = []
for i, countour in enumerate(contours):
fitBox = cv2.minAreaRect(countour)
if not _has_box_valid_size(fitBox):
continue
box = RotatedBoundingBox(fitBox, countour, debug_id=i)
if min_size is not None and (box.size[0] < min_size[0] or box.size[1] < min_size[1]):
continue
if max_size is not None:
if max_size[0] > 0 and box.size[0] > max_size[0]:
continue
if max_size[1] > 0 and box.size[1] > max_size[1]:
continue
boxes.append(box)
if skip_merging:
return boxes
if thicken_boxes is not None:
boxes = [box.make_box_thicker(thicken_boxes) for box in boxes]
return _get_box_for_whole_group(merge_overlaying_bounding_boxes(boxes))
def create_rotated_bounding_box(contour: cvt.MatLike, debug_id: int) -> RotatedBoundingBox:
box = cv2.minAreaRect(contour)
return RotatedBoundingBox(box, contour, debug_id=debug_id)
def create_lines(
img: NDArray,
threshold: int = 100,
min_line_length: int = 100,
max_line_gap: int = 10,
skip_merging: bool = False,
) -> list[RotatedBoundingBox]:
lines = cv2.HoughLinesP(
img, 1, np.pi / 180, threshold, minLineLength=min_line_length, maxLineGap=max_line_gap
)
boxes = []
for i, line in enumerate(lines):
x1, y1, x2, y2 = line[0]
contour = np.array([[x1, y1], [x2, y2]])
box = cv2.minAreaRect(contour)
if box[1][0] > box[1][1]:
boxes.append(RotatedBoundingBox(box, contour, debug_id=i))
if skip_merging:
return boxes
return _get_box_for_whole_group(merge_overlaying_bounding_boxes(boxes))
def create_bounding_ellipses(
img: NDArray,
skip_merging: bool = False,
min_size: tuple[int, int] | None = None,
max_size: tuple[int, int] | None = None,
) -> list[BoundingEllipse]:
contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
boxes = []
for i, countour in enumerate(contours):
min_length_to_fit_ellipse = 5 # this is a requirement by opencv
if len(countour) < min_length_to_fit_ellipse:
continue
fitBox = cv2.fitEllipse(countour)
if not _has_box_valid_size(fitBox):
continue
box = BoundingEllipse(fitBox, countour, debug_id=i)
if min_size is not None and (box.size[0] < min_size[0] or box.size[1] < min_size[1]):
continue
if max_size is not None and (box.size[0] > max_size[0] or box.size[1] > max_size[1]):
continue
boxes.append(box)
if skip_merging:
return boxes
return _get_ellipse_for_whole_group(merge_overlaying_bounding_boxes(boxes))
def move_overlaying_bounding_boxes(
src: list[AngledBoundingBox], dest: list[AngledBoundingBox], dest_img: NDArray
) -> tuple[list[AngledBoundingBox], NDArray]:
"""
Every item in src which overlaps with one in dest will be transferred to dest_img
"""
result_img = dest_img.copy()
result_src = src.copy()
for dest_box in dest:
for src_box in src:
if src_box.is_overlapping(dest_box):
result_img[src_box.contours] = 1
if src_box in result_src:
result_src.remove(src_box)
return result_src, result_img
def _do_groups_overlap(group1: list[AngledBoundingBox], group2: list[AngledBoundingBox]) -> bool:
for box1 in group1:
for box2 in group2:
if box1.is_overlapping(box2):
return True
return False
def _merge_groups_recursive(
groups: list[list[AngledBoundingBox]], step: int
) -> list[list[AngledBoundingBox]]:
step_limit = 10
if step > step_limit:
eprint("Too many steps in _merge_groups_recursive, giving back current results")
return groups
number_of_changes = 0
merged: list[list[AngledBoundingBox]] = []
used_groups = set()
for i, group in enumerate(groups):
match_found = False
if i in used_groups:
continue
for j in range(i + 1, len(groups)):
if j in used_groups:
continue
other_group = groups[j]
if _do_groups_overlap(group, other_group):
merged.append(group + other_group)
number_of_changes += 1
used_groups.add(j)
match_found = True
break
if not match_found:
merged.append(group)
if number_of_changes == 0:
return merged
else:
return _merge_groups_recursive(merged, step + 1)
def get_largest_of_every_group(groups: list[list[AngledBoundingBox]]) -> list[AngledBoundingBox]:
result = []
for group in groups:
largest = max(group, key=lambda box: box.size[0] * box.size[1])
result.append(largest)
return result
def _get_ellipse_for_whole_group(groups: list[list[AngledBoundingBox]]) -> list[BoundingEllipse]:
result = []
for group in groups:
complete_contour = np.concatenate([box.contours for box in group])
box = cv2.minAreaRect(complete_contour)
result.append(BoundingEllipse(box, complete_contour))
return result
def _get_box_for_whole_group(groups: list[list[AngledBoundingBox]]) -> list[RotatedBoundingBox]:
result = []
for group in groups:
complete_contour = np.concatenate([box.contours for box in group])
box = cv2.minAreaRect(complete_contour)
result.append(RotatedBoundingBox(box, complete_contour))
return result
def merge_overlaying_bounding_boxes(
boxes: Sequence[AngledBoundingBox],
) -> list[list[AngledBoundingBox]]:
initial_groups: list[list[AngledBoundingBox]] = []
for box in boxes:
initial_groups.append([box])
return _merge_groups_recursive(initial_groups, 0)
| 21,990 | Python | .py | 510 | 32.937255 | 105 | 0.576854 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,851 | autocrop.py | liebharc_homr/homr/autocrop.py | import cv2
import numpy as np
from homr.type_definitions import NDArray
def autocrop(img: NDArray) -> NDArray:
"""
Find the largest contour on the image, which is expected to be the paper of sheet music
and extracts it from the image. If no contour is found, then the image is assumed to be
a full page view of sheet music and is returned as is.
"""
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hist = cv2.calcHist([img], [0], None, [256], [0, 256])
dominant_color_gray_scale = max(enumerate(hist), key=lambda x: x[1])[0]
# threshold
thresh = cv2.threshold(gray, dominant_color_gray_scale - 30, 255, cv2.THRESH_BINARY)[1]
# apply morphology
kernel = np.ones((7, 7), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
kernel = np.ones((9, 9), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_ERODE, kernel)
# get largest contour
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
only_one_item_on_background = 2
contours = contours[0] if len(contours) == only_one_item_on_background else contours[1] # type: ignore
area_thresh = 0.0
big_contour = None
for c in contours:
area = cv2.contourArea(c) # type: ignore
if area > area_thresh:
area_thresh = area
big_contour = c
if big_contour is None:
return img
# get bounding box
x, y, w, h = cv2.boundingRect(big_contour) # type: ignore
page_width = img.shape[1]
page_height = img.shape[0]
# If we can't find a large contour, then we assume that the picture doesn't have page borders
is_full_page_view = x < page_width * 0.25 or y < page_height * 0.25
if is_full_page_view:
return img
# crop result
result = img[y : y + h, x : x + w]
return result
| 1,920 | Python | .py | 44 | 36.863636 | 108 | 0.648547 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,852 | image_utils.py | liebharc_homr/homr/image_utils.py | import numpy as np
from homr.type_definitions import NDArray
def _limit_x(image: NDArray, x: float) -> int:
return max(0, min(image.shape[1] - 1, int(round(x))))
def _limit_y(image: NDArray, y: float) -> int:
return max(0, min(image.shape[0] - 1, int(round(y))))
def crop_image(image: NDArray, x1: float, y1: float, x2: float, y2: float) -> NDArray:
image, _ignored = crop_image_and_return_new_top(image, x1, y1, x2, y2)
return image
def crop_image_and_return_new_top(
image: NDArray, x1: float, y1: float, x2: float, y2: float
) -> tuple[NDArray, NDArray]:
x_min = min(x1, x2)
x_max = max(x1, x2)
y_min = min(y1, y2)
y_max = max(y1, y2)
x1_limited = _limit_x(image, x_min)
y1_limited = _limit_y(image, y_min)
x2_limited = _limit_x(image, x_max)
y2_limited = _limit_y(image, y_max)
new_top_x = np.array([x1_limited, y1_limited])
return image[y1_limited:y2_limited, x1_limited:x2_limited], new_top_x
| 999 | Python | .py | 22 | 39.863636 | 87 | 0.636079 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,853 | bar_line_detection.py | liebharc_homr/homr/bar_line_detection.py | import cv2
import numpy as np
from homr import constants
from homr.bounding_boxes import RotatedBoundingBox
from homr.model import BarLine, Staff
from homr.type_definitions import NDArray
def prepare_bar_line_image(image: NDArray) -> NDArray:
kernel = np.ones((5, 3), np.uint8)
result = cv2.dilate(image, kernel, iterations=1)
return result
def detect_bar_lines(
bar_lines: list[RotatedBoundingBox], unit_size: float
) -> list[RotatedBoundingBox]:
"""
Filters the bar line candidates based on their size.
"""
result = []
for bar_line in bar_lines:
if bar_line.size[1] < constants.bar_line_min_height(unit_size):
continue
if bar_line.size[0] > constants.bar_line_max_width(unit_size):
continue
result.append(bar_line)
return result
def add_bar_lines_to_staffs(
staffs: list[Staff], bar_lines: list[RotatedBoundingBox]
) -> list[BarLine]:
result = []
for staff in staffs:
for bar_line in bar_lines:
if not staff.is_on_staff_zone(bar_line):
continue
point = staff.get_at(bar_line.center[0])
if point is None:
continue
if abs(bar_line.top_left[1] - point.y[0]) > constants.bar_line_to_staff_tolerance(
point.average_unit_size
):
continue
if abs(bar_line.bottom_left[1] - point.y[-1]) > constants.bar_line_to_staff_tolerance(
point.average_unit_size
):
continue
bar_line_symbol = BarLine(bar_line)
staff.add_symbol(bar_line_symbol)
result.append(bar_line_symbol)
return result
| 1,767 | Python | .py | 47 | 28.191489 | 99 | 0.61 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,854 | rest_detection.py | liebharc_homr/homr/rest_detection.py | import numpy as np
from homr import constants
from homr.bounding_boxes import RotatedBoundingBox
from homr.model import Rest, Staff
def add_rests_to_staffs(staffs: list[Staff], rests: list[RotatedBoundingBox]) -> list[Rest]:
result = []
central_staff_line_indexes = [1, 2]
for staff in staffs:
for rest in rests:
if not staff.is_on_staff_zone(rest):
continue
point = staff.get_at(rest.center[0])
if point is None:
continue
center = rest.center
idx_of_closest_y = np.argmin(np.abs([y_value - center[1] for y_value in point.y]))
is_in_center = idx_of_closest_y in central_staff_line_indexes
if not is_in_center:
continue
minimum_width_or_height = constants.minimum_rest_width_or_height(
point.average_unit_size
)
maximum_width_or_height = constants.maximum_rest_width_or_height(
point.average_unit_size
)
if rest.size[0] < minimum_width_or_height or rest.size[1] < minimum_width_or_height:
continue
if rest.size[0] > maximum_width_or_height or rest.size[1] > maximum_width_or_height:
continue
bbox = rest.to_bounding_box()
rest_symbol = Rest(bbox)
staff.add_symbol(rest_symbol)
result.append(rest_symbol)
return result
| 1,465 | Python | .py | 34 | 31.852941 | 96 | 0.596627 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,855 | results.py | liebharc_homr/homr/results.py | from enum import Enum
import numpy as np
from homr import constants
from homr.simple_logging import eprint
class ResultSymbol:
def __init__(self) -> None:
pass
class ClefType:
@staticmethod
def treble() -> "ClefType":
return ClefType(sign="G", line=2)
@staticmethod
def bass() -> "ClefType":
return ClefType(sign="F", line=4)
def __init__(self, sign: str, line: int) -> None:
"""
Why we don't support more clef types, e.g. the other examples given in
https://www.w3.org/2021/06/musicxml40/musicxml-reference/elements/clef/:
Since the other clef types share the same symbol with one of the ones we support,
we have to expect that the there is a lot of misdetections and this would degrade the
performance.
E.g. if the treble and french violin (https://en.wikipedia.org/wiki/Clef)
are easily confused. If support french violin then we will have cases where #
the treble clef is detected as french violin and then the pitch will be wrong.
If we get more training data and a reliable detecton of the rarer clef types,
we can add them here.
"""
self.sign = sign.upper()
if self.sign not in ["G", "F", "C"]:
raise Exception("Unknown clef sign " + sign)
# Extend get_reference_pitch if you add more clef types
treble_clef_line = 2
bass_clef_line = 4
alto_clef_line = 3
if sign == "G" and line != treble_clef_line:
eprint("Unsupported treble clef line", line)
self.line = treble_clef_line
elif sign == "F" and line != bass_clef_line:
eprint("Unsupported bass clef line", line)
self.line = bass_clef_line
elif sign == "C" and line != alto_clef_line:
eprint("Unsupported alto clef line", line)
self.line = alto_clef_line
else:
self.line = line
def __eq__(self, __value: object) -> bool:
if isinstance(__value, ClefType):
return self.sign == __value.sign and self.line == __value.line
else:
return False
def __hash__(self) -> int:
return hash((self.sign, self.line))
def __str__(self) -> str:
return f"{self.sign}{self.line}"
def __repr__(self) -> str:
return str(self)
def get_reference_pitch(self) -> "ResultPitch":
if self.sign == "G":
g2 = ResultPitch("C", 4, None)
return g2.move_by(2 * (self.line - 2), None)
elif self.sign == "F":
e2 = ResultPitch("E", 2, None)
return e2.move_by(2 * (self.line - 4), None)
elif self.sign == "C":
c3 = ResultPitch("C", 3, None)
return c3.move_by(2 * (self.line - 3), None)
raise ValueError("Unknown clef sign " + str(self))
class ResultTimeSignature(ResultSymbol):
def __init__(self, numerator: int, denominator: int) -> None:
self.numerator = numerator
self.denominator = denominator
def __eq__(self, __value: object) -> bool:
if isinstance(__value, ResultTimeSignature):
return self.numerator == __value.numerator and self.denominator == __value.denominator
else:
return False
def __hash__(self) -> int:
return hash((self.numerator, self.denominator))
def __str__(self) -> str:
return f"{self.numerator}/{self.denominator}"
def __repr__(self) -> str:
return str(self)
note_names = ["C", "D", "E", "F", "G", "A", "B"]
class ResultPitch:
def __init__(self, step: str, octave: int, alter: int | None) -> None:
self.step = step
self.octave = octave
self.alter = alter
def __eq__(self, __value: object) -> bool:
if isinstance(__value, ResultPitch):
return (
self.step == __value.step
and self.octave == __value.octave
and self.alter == __value.alter
)
else:
return False
def name_and_octave(self) -> str:
return self.step + str(self.octave)
def __hash__(self) -> int:
return hash((self.step, self.octave, self.alter))
def __str__(self) -> str:
alter = ""
if self.alter == 1:
alter = "#"
elif self.alter == -1:
alter = "b"
elif self.alter == 0:
alter = "â™®"
return f"{self.step}{self.octave}{alter}"
def __repr__(self) -> str:
return str(self)
def get_relative_position(self, other: "ResultPitch") -> int:
return (
(self.octave - other.octave) * 7
+ note_names.index(self.step)
- note_names.index(other.step)
)
def move_by(self, steps: int, alter: int | None) -> "ResultPitch":
step_index = (note_names.index(self.step) + steps) % 7
step = note_names[step_index]
octave = self.octave + abs(steps - step_index) // 6 * np.sign(steps)
return ResultPitch(step, octave, alter)
def get_pitch_from_relative_position(
reference_pitch: ResultPitch, relative_position: int, alter: int | None
) -> ResultPitch:
step_index = (note_names.index(reference_pitch.step) + relative_position) % 7
step = note_names[step_index]
# abs & sign give us integer division with rounding towards 0
octave = reference_pitch.octave + abs(relative_position - step_index) // 6 * np.sign(
relative_position
)
return ResultPitch(step, int(octave), alter)
class ResultClef(ResultSymbol):
def __init__(self, clef_type: ClefType, circle_of_fifth: int) -> None:
self.clef_type = clef_type
self.circle_of_fifth = circle_of_fifth
def __eq__(self, __value: object) -> bool:
if isinstance(__value, ResultClef):
return (
self.clef_type == __value.clef_type
and self.circle_of_fifth == __value.circle_of_fifth
)
else:
return False
def __hash__(self) -> int:
return hash((self.clef_type, self.circle_of_fifth))
def __str__(self) -> str:
return f"{self.clef_type}/{self.circle_of_fifth}"
def __repr__(self) -> str:
return str(self)
def get_reference_pitch(self) -> ResultPitch:
return self.clef_type.get_reference_pitch()
def move_pitch_to_clef(
pitch: ResultPitch, current: ResultClef | None, new: ResultClef
) -> ResultPitch:
"""
Moves the pitch from the current clef to the new clef under the assumption that the clef
was incorrectly identified, but the pitch position is correct.
"""
if current is None or new is None or current.clef_type == new.clef_type:
return pitch
current_reference_pitch = current.get_reference_pitch()
new_reference_pitch = new.get_reference_pitch()
relative_position = pitch.get_relative_position(current_reference_pitch)
return get_pitch_from_relative_position(
new_reference_pitch, relative_position, alter=pitch.alter
)
def _get_duration_name(duration: int) -> str:
duration_dict = {
4 * constants.duration_of_quarter: "whole",
2 * constants.duration_of_quarter: "half",
constants.duration_of_quarter: "quarter",
constants.duration_of_quarter / 2: "eighth",
constants.duration_of_quarter / 4: "16th",
constants.duration_of_quarter / 8: "32nd",
constants.duration_of_quarter / 16: "64th",
}
result = duration_dict.get(duration, None)
if result is None:
eprint("Unknown duration", duration)
return "quarter"
return result
class DurationModifier(Enum):
NONE = 0
DOT = 1
TRIPLET = 2
def __init__(self, duration: int) -> None:
self.duration = duration
def __str__(self) -> str:
if self == DurationModifier.NONE:
return ""
elif self == DurationModifier.DOT:
return "."
elif self == DurationModifier.TRIPLET:
return constants.triplet_symbol
else:
return "Invalid duration"
def _adjust_duration(duration: int, modifier: DurationModifier) -> int:
if modifier == DurationModifier.DOT:
return duration * 3 // 2
elif modifier == DurationModifier.TRIPLET:
return duration * 2 // 3
else:
return duration
class ResultDuration:
def __init__(self, base_duration: int, modifier: DurationModifier = DurationModifier.NONE):
self.duration = _adjust_duration(base_duration, modifier)
self.modifier = modifier
self.duration_name = _get_duration_name(base_duration)
def __eq__(self, __value: object) -> bool:
if isinstance(__value, ResultDuration):
return self.duration == __value.duration and self.modifier == __value.modifier
else:
return False
def __hash__(self) -> int:
return hash((self.duration, self.modifier))
def __str__(self) -> str:
return f"{self.duration_name}{str(self.modifier)}"
def __repr__(self) -> str:
return str(self)
class ResultNote:
def __init__(self, pitch: ResultPitch, duration: ResultDuration):
self.pitch = pitch
self.duration = duration
def __eq__(self, __value: object) -> bool:
if isinstance(__value, ResultNote):
return self.pitch == __value.pitch and self.duration == __value.duration
else:
return False
def __hash__(self) -> int:
return hash((self.pitch, self.duration))
def __str__(self) -> str:
return f"{self.pitch}_{self.duration}"
def __repr__(self) -> str:
return str(self)
def get_min_duration(notes: list[ResultNote]) -> ResultDuration:
if len(notes) == 0:
return ResultDuration(constants.duration_of_quarter)
return min([note.duration for note in notes], key=lambda x: x.duration)
class ResultChord(ResultSymbol):
"""
A chord which contains 0 to many pitches. 0 pitches indicates that this is a rest.
The duration of the chord is the distance to the next chord. The individual pitches
my have a different duration.
"""
def __init__(self, duration: ResultDuration, notes: list[ResultNote]):
self.notes = notes
self.duration = duration
@property
def is_rest(self) -> bool:
return len(self.notes) == 0
def __eq__(self, __value: object) -> bool:
if isinstance(__value, ResultChord):
return self.duration == __value.duration and self.notes == __value.notes
else:
return False
def __hash__(self) -> int:
return hash((self.notes, self.duration))
def __str__(self) -> str:
return f"{'&'.join(map(str, self.notes))}"
def __repr__(self) -> str:
return str(self)
class ResultMeasure:
def __init__(self, symbols: list[ResultSymbol]):
self.symbols = symbols
self.is_new_line = False
def is_empty(self) -> bool:
return len(self.symbols) == 0
def remove_symbol(self, symbol: ResultSymbol) -> None:
len_before = len(self.symbols)
self.symbols = [s for s in self.symbols if s is not symbol]
if len_before == len(self.symbols):
raise Exception("Could not remove symbol")
def __eq__(self, __value: object) -> bool:
if isinstance(__value, ResultMeasure):
if len(self.symbols) != len(__value.symbols):
return False
for i in range(len(self.symbols)):
if self.symbols[i] != __value.symbols[i]:
return False
return True
else:
return False
def __hash__(self) -> int:
return hash(tuple(self.symbols))
def __str__(self) -> str:
return f"{' '.join(map(str, self.symbols))}" + "|"
def __repr__(self) -> str:
return str(self)
def length_in_quarters(self) -> float:
return sum(
symbol.duration.duration for symbol in self.symbols if isinstance(symbol, ResultChord)
)
class ResultStaff:
def __init__(self, measures: list[ResultMeasure]):
self.measures = measures
def merge(self, other: "ResultStaff") -> "ResultStaff":
return ResultStaff(self.measures + other.measures)
def get_symbols(self) -> list[ResultSymbol]:
symbols = []
for measure in self.measures:
symbols.extend(measure.symbols)
return symbols
def number_of_new_lines(self) -> int:
return sum(1 for measure in self.measures if measure.is_new_line)
def replace_symbol(self, old_symbol: ResultSymbol, new_symbol: ResultSymbol) -> None:
for measure in self.measures:
measure.symbols = [new_symbol if s is old_symbol else s for s in measure.symbols]
def __eq__(self, __value: object) -> bool:
if isinstance(__value, ResultStaff):
if len(self.measures) != len(__value.measures):
return False
for i in range(len(self.measures)):
if self.measures[i] != __value.measures[i]:
return False
return True
else:
return False
def __hash__(self) -> int:
return hash(tuple(self.measures))
def __str__(self) -> str:
return "Staff(" + f"{' '.join(map(str, self.measures))}" + ")"
def __repr__(self) -> str:
return str(self)
def is_empty(self) -> bool:
return len(self.measures) == 0
| 13,970 | Python | .py | 327 | 33.021407 | 99 | 0.589635 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,856 | noise_filtering.py | liebharc_homr/homr/noise_filtering.py | import cv2
import numpy as np
from homr import constants
from homr.debug import Debug
from homr.model import InputPredictions
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
def estimate_noise(gray: NDArray) -> int:
H, W = gray.shape
M = np.array([[1, -2, 1], [-2, 4, -2], [1, -2, 1]])
sigma = np.sum(np.sum(np.absolute(cv2.filter2D(gray, cv2.CV_64F, M)))) / (H * W)
return sigma # type: ignore
def create_noise_grid(gray: NDArray, debug: Debug) -> NDArray | None: # noqa: C901, PLR0912
imgheight, imgwidth = gray.shape
M, N = imgheight // 20, imgwidth // 20
debug_image = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
mask = np.zeros(gray.shape, dtype=np.uint8)
grid = create_grid(gray, M, N)
filtered_cells, total_cells = apply_noise_filter(grid, mask, debug_image, M, N)
if debug.debug:
debug.write_image("noise_crop", debug_image)
return handle_filter_results(filtered_cells, total_cells, mask)
def create_grid(gray: NDArray, M: int, N: int) -> NDArray:
imgheight, imgwidth = gray.shape
grid = np.zeros([int(np.ceil(imgheight / M)), int(np.ceil(imgwidth / N))], dtype=np.uint8)
for i, y1 in enumerate(range(0, imgheight, M)):
for j, x1 in enumerate(range(0, imgwidth, N)):
y2, x2 = y1 + M, x1 + N
tile = gray[y1:y2, x1:x2]
noise = estimate_noise(tile)
grid[i, j] = noise
return grid
def apply_noise_filter(
grid: NDArray, mask: NDArray, debug_image: NDArray, M: int, N: int
) -> tuple[int, int]:
imgheight, imgwidth = mask.shape
filtered_cells, total_cells = 0, 0
for i, y1 in enumerate(range(0, imgheight, M)):
for j, x1 in enumerate(range(0, imgwidth, N)):
y2, x2 = y1 + M, x1 + N
noise = grid[i, j]
neighbors = get_neighbors(grid, i, j)
any_neighbor_above_limit = np.any(np.array(neighbors) > constants.image_noise_limit)
if noise > constants.image_noise_limit and any_neighbor_above_limit:
cv2.rectangle(debug_image, (x1, y1), (x2, y2), (0, 255, 255))
filtered_cells += 1
else:
mask[y1:y2, x1:x2] = 255
cv2.rectangle(debug_image, (x1, y1), (x2, y2), (0, 255, 0))
cv2.putText(
debug_image,
f"{noise:.2f}",
(x1 + N // 2, y1 + M // 2),
cv2.FONT_HERSHEY_PLAIN,
1,
(0, 0, 255),
)
total_cells += 1
return filtered_cells, total_cells
def get_neighbors(grid: NDArray, i: int, j: int) -> list[int]:
neighbors = []
if i > 0:
neighbors.append(grid[i - 1, j])
if j > 0:
neighbors.append(grid[i, j - 1])
if i < grid.shape[0] - 1:
neighbors.append(grid[i + 1, j])
if j < grid.shape[1] - 1:
neighbors.append(grid[i, j + 1])
return neighbors
def handle_filter_results(filtered_cells: int, total_cells: int, mask: NDArray) -> NDArray | None:
half = 0.5
if filtered_cells / total_cells > half:
eprint(
f"Would filter more than 50% of the image with {filtered_cells} of {total_cells} "
+ "cells, skipping noise filtering"
)
return None
elif filtered_cells > 0:
eprint(f"Filtered {filtered_cells} of {total_cells} cells")
return mask
return None
def filter_predictions(prediction: InputPredictions, debug: Debug) -> InputPredictions:
mask = create_noise_grid(255 * prediction.staff, debug)
if mask is None:
return prediction
return InputPredictions(
original=cv2.bitwise_and(prediction.original, prediction.original, mask=mask),
preprocessed=cv2.bitwise_and(prediction.preprocessed, prediction.preprocessed, mask=mask),
notehead=cv2.bitwise_and(prediction.notehead, prediction.notehead, mask=mask),
symbols=cv2.bitwise_and(prediction.symbols, prediction.symbols, mask=mask),
staff=cv2.bitwise_and(prediction.staff, prediction.staff, mask=mask),
clefs_keys=cv2.bitwise_and(prediction.clefs_keys, prediction.clefs_keys, mask=mask),
stems_rest=cv2.bitwise_and(prediction.stems_rest, prediction.stems_rest, mask=mask),
)
| 4,418 | Python | .py | 95 | 37.242105 | 99 | 0.609642 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,857 | tr_omr_parser.py | liebharc_homr/homr/tr_omr_parser.py | from homr import constants
from homr.results import (
ClefType,
DurationModifier,
ResultChord,
ResultClef,
ResultDuration,
ResultMeasure,
ResultNote,
ResultPitch,
ResultStaff,
ResultTimeSignature,
get_min_duration,
)
from homr.simple_logging import eprint
class TrOMRParser:
def __init__(self) -> None:
self._key_signatures: list[int] = []
self._time_signatures: list[str] = []
self._clefs: list[ClefType] = []
self._number_of_accidentals: int = 0
def number_of_clefs(self) -> int:
return len(self._clefs)
def number_of_key_signatures(self) -> int:
return len(self._key_signatures)
def number_of_time_signatures(self) -> int:
return len(self._time_signatures)
def number_of_accidentals(self) -> int:
"""
Returns the number of accidentals including the key signatures.
"""
return self._number_of_accidentals + sum(
[abs(key_signature) for key_signature in self._key_signatures]
)
def parse_clef(self, clef: str) -> ResultClef:
parts = clef.split("-")
clef_type_str = parts[1]
clef_type = ClefType(clef_type_str[0], int(clef_type_str[1]))
self._clefs.append(clef_type)
return ResultClef(clef_type, 0)
def parse_key_signature(self, key_signature: str, clef: ResultClef) -> None:
key_signature_mapping = {
"CbM": -7,
"GbM": -6,
"DbM": -5,
"AbM": -4,
"EbM": -3,
"BbM": -2,
"FM": -1,
"CM": 0,
"GM": 1,
"DM": 2,
"AM": 3,
"EM": 4,
"BM": 5,
"F#M": 6,
"C#M": 7,
}
signature_name = key_signature.split("-")[1]
if signature_name in key_signature_mapping:
clef.circle_of_fifth = key_signature_mapping[signature_name]
self._key_signatures.append(clef.circle_of_fifth)
else:
eprint("WARNING: Unrecognized key signature: " + signature_name)
def parse_time_signature(self, time_signature: str) -> ResultTimeSignature:
parts = time_signature.split("-")
time_abbreviation = parts[1]
numerator = 1
denominator = 1
if time_abbreviation == "C":
numerator = 4
denominator = 4
elif time_abbreviation == "C/":
numerator = 2
denominator = 2
else:
denominator = int(time_abbreviation[1:])
self._time_signatures.append(time_abbreviation)
return ResultTimeSignature(numerator, denominator)
def parse_duration_name(self, duration_name: str) -> int:
duration_mapping = {
"whole": constants.duration_of_quarter * 4,
"half": constants.duration_of_quarter * 2,
"quarter": constants.duration_of_quarter,
"eighth": constants.duration_of_quarter // 2,
"sixteenth": constants.duration_of_quarter // 4,
"thirty_second": constants.duration_of_quarter // 8,
}
return duration_mapping.get(duration_name, constants.duration_of_quarter // 16)
def parse_duration(self, duration: str) -> ResultDuration:
has_dot = duration.endswith(".")
is_triplet = duration.endswith(constants.triplet_symbol)
modifier = DurationModifier.NONE
if has_dot:
duration = duration[:-1]
modifier = DurationModifier.DOT
elif is_triplet:
duration = duration[:-1]
modifier = DurationModifier.TRIPLET
return ResultDuration(
self.parse_duration_name(duration),
modifier,
)
def parse_note(self, note: str) -> ResultNote:
try:
note_details = note.split("-")[1]
pitch_and_duration = note_details.split("_")
pitch = pitch_and_duration[0]
duration = pitch_and_duration[1]
note_name = pitch[0]
octave = int(pitch[1])
alter = None
len_with_accidental = 2
if len(pitch) > len_with_accidental:
accidental = pitch[2]
self._number_of_accidentals += 1
if accidental == "b":
alter = -1
elif accidental == "#":
alter = 1
else:
alter = 0
return ResultNote(ResultPitch(note_name, octave, alter), self.parse_duration(duration))
except Exception:
eprint("Failed to parse note: " + note)
return ResultNote(ResultPitch("C", 4, 0), ResultDuration(constants.duration_of_quarter))
def parse_notes(self, notes: str) -> ResultChord | None:
note_parts = notes.split("|")
note_parts = [note_part for note_part in note_parts if note_part.startswith("note")]
rest_parts = [rest_part for rest_part in note_parts if rest_part.startswith("rest")]
if len(note_parts) == 0:
if len(rest_parts) == 0:
return None
else:
return self.parse_rest(rest_parts[0])
result_notes = [self.parse_note(note_part) for note_part in note_parts]
return ResultChord(get_min_duration(result_notes), result_notes)
def parse_rest(self, rest: str) -> ResultChord:
rest = rest.split("|")[0]
duration = rest.split("-")[1]
return ResultChord(
self.parse_duration(duration),
[],
)
def parse_tr_omr_output(self, output: str) -> ResultStaff: # noqa: C901
parts = output.split("+")
measures = []
current_measure = ResultMeasure([])
parse_functions = {
"clef": self.parse_clef,
"timeSignature": self.parse_time_signature,
"rest": self.parse_rest,
}
for part in parts:
if part == "barline":
measures.append(current_measure)
current_measure = ResultMeasure([])
elif part.startswith("keySignature"):
if len(current_measure.symbols) > 0 and isinstance(
current_measure.symbols[-1], ResultClef
):
self.parse_key_signature(part, current_measure.symbols[-1])
elif part.startswith("multirest"):
eprint("Skipping over multirest")
elif part.startswith("note") or "|" in part:
note_result = self.parse_notes(part)
if note_result is not None:
current_measure.symbols.append(note_result)
else:
for prefix, parse_function in parse_functions.items():
if part.startswith(prefix):
current_measure.symbols.append(parse_function(part))
break
if len(current_measure.symbols) > 0:
measures.append(current_measure)
return ResultStaff(measures)
| 7,247 | Python | .py | 176 | 29.017045 | 101 | 0.550562 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,858 | type_definitions.py | liebharc_homr/homr/type_definitions.py | from typing import Any
import numpy as np
Model = Any
NDArray = np.ndarray[Any, Any]
| 88 | Python | .py | 4 | 20.25 | 30 | 0.790123 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,859 | debug.py | liebharc_homr/homr/debug.py | import glob
import os
from collections.abc import Sequence
from itertools import chain
import cv2
import numpy as np
from homr.bounding_boxes import DebugDrawable
from homr.type_definitions import NDArray
class AttentionDebug:
def __init__(self, filename: str, image: NDArray, parent: "Debug") -> None:
self.image = image
self.destname = filename
self.attentions: list[NDArray] = []
self.parent = parent
def add_attention(self, attention: NDArray, center: tuple[float, float]) -> None:
attention_resized = cv2.resize(attention, (self.image.shape[1], self.image.shape[0]))
# Apply a colormap to the attention weights
attention_colormap = cv2.applyColorMap( # type: ignore
np.uint8(255.0 * attention_resized / attention_resized.max()), cv2.COLORMAP_JET # type: ignore
)
overlay = cv2.addWeighted(self.image, 0.6, attention_colormap, 0.4, 0)
# Draw the center of attention
center_coordinates = (int(center[1]), int(center[0]))
radius = 20
color = (0, 255, 0)
thickness = 2
cv2.circle(overlay, center_coordinates, radius, color, thickness)
self.attentions.append(overlay)
def reset(self) -> None:
self.attentions = []
def write(self) -> None:
if not self.attentions:
return
attention = cv2.vconcat(self.attentions)
self.parent._remember_file_name(self.destname)
cv2.imwrite(self.destname, attention)
self.attentions = []
class Debug:
def __init__(self, original_image: NDArray, filename: str, debug: bool):
self.filename = filename
self.original_image = original_image
filename = filename.replace("\\", "/")
self.dir_name = os.path.dirname(filename)
self.base_filename = os.path.join(self.dir_name, filename.split("/")[-1].split(".")[0])
self.debug = debug
self.colors = [
(0, 255, 0),
(0, 0, 255),
(255, 0, 0),
(255, 255, 0),
(0, 255, 255),
(255, 0, 255),
(255, 165, 0),
(255, 182, 193),
(128, 0, 128),
(64, 224, 208),
]
self.debug_output_counter = 0
self.written_files: list[str] = []
def clean_debug_files_from_previous_runs(self) -> None:
prefixes = (
self.base_filename + "_debug_",
self.base_filename + "_tesseract_input",
self.base_filename + "_staff-",
)
for file in glob.glob(self.base_filename + "*"):
if file.startswith(prefixes) and file not in self.written_files:
os.remove(file)
def _debug_file_name(self, suffix: str) -> str:
self.debug_output_counter += 1
return f"{self.base_filename}_debug_{str(self.debug_output_counter)}_{suffix}.png"
def write_threshold_image(self, suffix: str, image: NDArray) -> None:
if not self.debug:
return
filename = self._debug_file_name(suffix)
self._remember_file_name(filename)
cv2.imwrite(filename, 255 * image)
def _remember_file_name(self, filename: str) -> None:
self.written_files.append(filename)
def write_bounding_boxes(self, suffix: str, bounding_boxes: Sequence[DebugDrawable]) -> None:
if not self.debug:
return
img = self.original_image.copy()
for box in bounding_boxes:
box.draw_onto_image(img)
filename = self._debug_file_name(suffix)
self._remember_file_name(filename)
cv2.imwrite(filename, img)
def write_image(self, suffix: str, image: NDArray) -> None:
if not self.debug:
return
filename = self._debug_file_name(suffix)
self._remember_file_name(filename)
cv2.imwrite(filename, image)
def write_image_with_fixed_suffix(self, suffix: str, image: NDArray) -> None:
if not self.debug:
return
filename = self.base_filename + suffix
self._remember_file_name(filename)
cv2.imwrite(filename, image)
def write_all_bounding_boxes_alternating_colors(
self, suffix: str, *boxes: Sequence[DebugDrawable]
) -> None:
self.write_bounding_boxes_alternating_colors(suffix, list(chain.from_iterable(boxes)))
def write_bounding_boxes_alternating_colors(
self, suffix: str, bounding_boxes: Sequence[DebugDrawable]
) -> None:
if not self.debug:
return
self.write_teaser(self._debug_file_name(suffix), bounding_boxes)
def write_teaser(self, filename: str, bounding_boxes: Sequence[DebugDrawable]) -> None:
img = self.original_image.copy()
for i, box in enumerate(bounding_boxes):
color = self.colors[i % len(self.colors)]
box.draw_onto_image(img, color)
self._remember_file_name(filename)
cv2.imwrite(filename, img)
def write_model_input_image(self, suffix: str, staff_image: NDArray) -> str:
"""
These files aren't really debug files, but it's convenient to handle them here
so that they are cleaned up together with the debug files.
Model input images are the input to the transformer or OCR images.
"""
filename = self.base_filename + suffix
if self.debug:
self._remember_file_name(filename)
cv2.imwrite(filename, staff_image)
return filename
def build_attention_debug(self, image: NDArray, suffix: str) -> AttentionDebug | None:
if not self.debug:
return None
filename = self.base_filename + suffix
self._remember_file_name(filename)
return AttentionDebug(filename, image, self)
| 5,937 | Python | .py | 134 | 34.261194 | 108 | 0.609943 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,860 | accidental_rules.py | liebharc_homr/homr/accidental_rules.py | from homr.circle_of_fifths import get_circle_of_fifth_notes
from homr.results import ResultChord, ResultClef, ResultNote, ResultPitch, ResultStaff
def _keep_accidentals_until_cancelled(staff: ResultStaff) -> None:
"""
Implements the rule that accidentals are kept until
cancelled by a natural sign or a new measure.
"""
for measure in staff.measures:
accidentals: dict[str, int | None] = {}
for symbol in measure.symbols:
if isinstance(symbol, ResultClef):
accidentals = {}
elif isinstance(symbol, ResultChord):
for note in symbol.notes:
_process_note(note, accidentals)
def _process_note(note: ResultNote, accidentals: dict[str, int | None]) -> None:
pitch = note.pitch
note_name = pitch.name_and_octave()
if pitch.alter is not None:
accidentals[note_name] = pitch.alter
else:
alter_by_accidental = accidentals.get(note_name, None)
if alter_by_accidental is not None:
pitch.alter = alter_by_accidental
def _apply_key_signature(staff: ResultStaff) -> None:
"""
Applies the key signature to the notes.
"""
circle_of_fifth = 0
circle_of_fifth_notes = []
measure_number = 0
for measure in staff.measures:
measure_number += 1
for symbol in measure.symbols:
if isinstance(symbol, ResultClef):
circle_of_fifth = symbol.circle_of_fifth
circle_of_fifth_notes = get_circle_of_fifth_notes(circle_of_fifth)
elif isinstance(symbol, ResultChord):
for note in symbol.notes:
_apply_key_to_pitch(note.pitch, circle_of_fifth, circle_of_fifth_notes)
def _apply_key_to_pitch(
pitch: ResultPitch, circle_of_fifth: int, circle_of_fifth_notes: list[str]
) -> None:
if pitch.alter is None:
altered_by_key = pitch.step in circle_of_fifth_notes
if altered_by_key:
pitch.alter = 1 if circle_of_fifth >= 0 else -1
def maintain_accidentals(staffs: list[ResultStaff]) -> list[ResultStaff]:
"""
How MusicXML works: In music XML the alter must be set for every note
independent of previous alters in the measure
or key.
So a sequence which is printed as
"Key D Major, Note C, Note Cb, Note C" must be encoded as
"Key D Major, Note C#, Note Cb, Note Cb" in MusicXML,
where the first "#" comes from the key of D Major.
"""
result: list[ResultStaff] = []
for staff in staffs:
_keep_accidentals_until_cancelled(staff)
_apply_key_signature(staff)
result.append(staff)
return result
| 2,739 | Python | .py | 63 | 34.650794 | 92 | 0.639412 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,861 | download_utils.py | liebharc_homr/homr/download_utils.py | import os
import tarfile
import zipfile
import requests
from homr.simple_logging import eprint
def download_file(url: str, filename: str) -> None:
response = requests.get(url, stream=True, timeout=5)
total = int(response.headers.get("content-length", 0))
totalMb = round(total / 1024 / 1024)
last_percent = -1
complete = 100
with open(filename, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress = f.tell()
progressMb = round(progress / 1024 / 1024)
if total > 0:
progressPercent = complete * progress // total
if progressPercent != last_percent:
eprint(
f"\rDownloaded {progressMb} of {totalMb} MB ({progressPercent}%)",
end="",
)
last_percent = progressPercent
else:
eprint(f"\rDownloaded {progressMb} MB", end="")
if total > 0 and last_percent != complete:
eprint(f"\rDownloaded {totalMb} of {totalMb} MB (100%)")
else:
eprint() # Add newline after download progress
def unzip_file(filename: str, output_folder: str) -> None:
with zipfile.ZipFile(filename, "r") as zip_ref:
for member in zip_ref.namelist():
# Ensure file path is safe
if os.path.isabs(member) or ".." in member:
eprint(f"Skipping potentially unsafe file {member}")
continue
# Handle directories
if member.endswith("/"):
os.makedirs(os.path.join(output_folder, member), exist_ok=True)
continue
# Extract file
source = zip_ref.open(member)
target = open(os.path.join(output_folder, member), "wb")
with source, target:
while True:
chunk = source.read(1024)
if not chunk:
break
target.write(chunk)
def untar_file(filename: str, output_folder: str) -> None:
with tarfile.open(filename, "r:gz") as tar:
for member in tar.getmembers():
# Ensure file path is safe
if os.path.isabs(member.name) or ".." in member.name:
eprint(f"Skipping potentially unsafe file {member.name}")
continue
# Handle directories
if member.type == tarfile.DIRTYPE:
os.makedirs(os.path.join(output_folder, member.name), exist_ok=True)
continue
# Extract file
source = tar.extractfile(member)
if source is None:
continue
target = open(os.path.join(output_folder, member.name), "wb")
with source, target:
while True:
chunk = source.read(1024)
if not chunk:
break
target.write(chunk)
| 3,126 | Python | .py | 73 | 29.178082 | 94 | 0.535879 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,862 | accidental_detection.py | liebharc_homr/homr/accidental_detection.py | from homr import constants
from homr.bounding_boxes import RotatedBoundingBox
from homr.model import Accidental, Staff
def add_accidentals_to_staffs(
staffs: list[Staff], accidentals: list[RotatedBoundingBox]
) -> list[Accidental]:
result = []
for staff in staffs:
for accidental in accidentals:
if not staff.is_on_staff_zone(accidental):
continue
point = staff.get_at(accidental.center[0])
if point is None:
continue
min_width_or_height = constants.minimum_accidental_width_or_height(
staff.average_unit_size
)
max_width_or_height = constants.maximum_accidental_width_or_height(
staff.average_unit_size
)
if (
accidental.size[0] < min_width_or_height
or accidental.size[0] > max_width_or_height
or accidental.size[1] < min_width_or_height
or accidental.size[1] > max_width_or_height
):
continue
position = point.find_position_in_unit_sizes(accidental)
accidental_bbox = accidental.to_bounding_box()
clef_symbol = Accidental(accidental_bbox, position)
staff.add_symbol(clef_symbol)
result.append(clef_symbol)
return result
| 1,364 | Python | .py | 33 | 30 | 79 | 0.606335 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,863 | model.py | liebharc_homr/homr/model.py | from abc import abstractmethod
from collections.abc import Callable
from enum import Enum
import cv2
import numpy as np
from typing_extensions import Self
from homr import constants
from homr.bounding_boxes import (
AngledBoundingBox,
BoundingBox,
BoundingEllipse,
DebugDrawable,
RotatedBoundingBox,
)
from homr.circle_of_fifths import get_circle_of_fifth_notes
from homr.results import ClefType, ResultPitch
from homr.type_definitions import NDArray
class InputPredictions:
def __init__(
self,
original: NDArray,
preprocessed: NDArray,
notehead: NDArray,
symbols: NDArray,
staff: NDArray,
clefs_keys: NDArray,
stems_rest: NDArray,
) -> None:
self.original = original
self.preprocessed = preprocessed
self.notehead = notehead
self.symbols = symbols
self.staff = staff
self.stems_rest = stems_rest
self.clefs_keys = clefs_keys
class SymbolOnStaff(DebugDrawable):
def __init__(self, center: tuple[float, float]) -> None:
self.center = center
@abstractmethod
def copy(self) -> Self:
pass
def transform_coordinates(
self, transformation: Callable[[tuple[float, float]], tuple[float, float]]
) -> Self:
copy = self.copy()
copy.center = transformation(self.center)
return copy
def calc_distance_to(self, point: tuple[float, float]) -> float:
return float(np.linalg.norm(np.array(self.center) - np.array(point)))
def calc_x_distance_to(self, point: tuple[float, float]) -> float:
return abs(self.center[0] - point[0])
class Accidental(SymbolOnStaff):
def __init__(self, box: BoundingBox, position: int) -> None:
super().__init__(box.center)
self.box = box
self.position = position
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
self.box.draw_onto_image(img, color)
cv2.putText(
img,
"accidental-" + str(self.position),
(int(self.box.box[0]), int(self.box.box[1])),
cv2.FONT_HERSHEY_SIMPLEX,
1,
color,
2,
cv2.LINE_AA,
)
def __str__(self) -> str:
return "Accidental(" + str(self.center) + ")"
def __repr__(self) -> str:
return str(self)
def copy(self) -> "Accidental":
return Accidental(self.box, self.position)
class Rest(SymbolOnStaff):
def __init__(self, box: BoundingBox) -> None:
super().__init__(box.center)
self.box = box
self.has_dot = False
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
self.box.draw_onto_image(img, color)
cv2.putText(
img,
"rest",
(int(self.box.box[0]), int(self.box.box[1])),
cv2.FONT_HERSHEY_SIMPLEX,
1,
color,
2,
cv2.LINE_AA,
)
def __str__(self) -> str:
return "Rest(" + str(self.center) + ")"
def __repr__(self) -> str:
return str(self)
def copy(self) -> "Rest":
return Rest(self.box)
class StemDirection(Enum):
UP = 1
DOWN = 2
class NoteHeadType(Enum):
HOLLOW = 1
SOLID = 2
def __str__(self) -> str:
if self == NoteHeadType.HOLLOW:
return "O"
elif self == NoteHeadType.SOLID:
return "*"
else:
raise Exception("Unknown NoteHeadType")
note_names = ["C", "D", "E", "F", "G", "A", "B"]
class Pitch:
def __init__(self, step: str, alter: int | None, octave: int):
self.step = step
self.alter: int | None
if alter is not None:
self.alter = int(alter)
else:
self.alter = None
self.octave = int(octave)
def move_by_position(self, position: int, circle_of_fifth: int) -> "Pitch":
# Find the current position of the note in the scale
current_position = note_names.index(self.step)
# Calculate the new position
new_position = (current_position + position) % len(note_names)
# Calculate the new octave
new_octave = self.octave + ((current_position + position) // len(note_names))
# Get the new step
new_step = note_names[new_position]
alter = None
if new_step in get_circle_of_fifth_notes(circle_of_fifth):
if circle_of_fifth < 0:
alter = -1
else:
alter = 1
return Pitch(new_step, alter, new_octave)
def to_result(self) -> ResultPitch:
return ResultPitch(self.step, self.octave, self.alter)
def copy(self) -> "Pitch":
return Pitch(self.step, self.alter, self.octave)
class Note(SymbolOnStaff):
def __init__(
self,
box: BoundingEllipse,
position: int,
stem: RotatedBoundingBox | None,
stem_direction: StemDirection | None,
):
super().__init__(box.center)
self.box = box
self.position = position
self.has_dot = False
self.beam_count = 0
self.stem = stem
self.clef_type = ClefType.treble()
self.circle_of_fifth = 0
self.accidental: Accidental | None = None
self.stem_direction = stem_direction
self.beams: list[RotatedBoundingBox] = []
self.flags: list[RotatedBoundingBox] = []
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
self.box.draw_onto_image(img, color)
dot_string = "." if self.has_dot else ""
cv2.putText(
img,
"note" + dot_string + str(self.position),
(int(self.box.center[0]), int(self.box.center[1])),
cv2.FONT_HERSHEY_SIMPLEX,
1,
color,
2,
cv2.LINE_AA,
)
if self.stem is not None:
self.stem.draw_onto_image(img, color)
for beam in self.beams:
beam.draw_onto_image(img, color)
for flag in self.flags:
flag.draw_onto_image(img, color)
def get_pitch(
self, clef_type: ClefType | None = None, circle_of_fifth: int | None = None
) -> Pitch:
clef_type = self.clef_type if clef_type is None else clef_type
circle_of_fifth = self.circle_of_fifth if circle_of_fifth is None else circle_of_fifth
reference = clef_type.get_reference_pitch()
reference_pitch = Pitch(reference.step, reference.alter, reference.octave)
# Position + 1 as the model uses a higher reference point on the staff
return reference_pitch.move_by_position(self.position + 1, circle_of_fifth)
def to_tr_omr_note(self, clef_type: ClefType) -> str:
pitch = self.get_pitch(clef_type=clef_type).to_result()
# We have no information about the duration here and default to quarter
return "note-" + str(pitch) + "_quarter"
def __str__(self) -> str:
return "Note(" + str(self.center) + ", " + str(self.position) + ")"
def __repr__(self) -> str:
return str(self)
def copy(self) -> "Note":
return Note(self.box, self.position, self.stem, self.stem_direction)
class NoteGroup(SymbolOnStaff):
def __init__(self, notes: list[Note]) -> None:
average_center = np.mean([note.center for note in notes], axis=0)
super().__init__(average_center)
# sort notes by pitch, highest position first
self.notes = sorted(notes, key=lambda note: note.position, reverse=True)
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
for note in self.notes:
note.draw_onto_image(img, color)
def to_tr_omr_note(self, clef_type: ClefType) -> str:
return "|".join([note.to_tr_omr_note(clef_type) for note in self.notes])
def __str__(self) -> str:
return "NoteGroup(" + str.join(",", [str(note) for note in self.notes]) + ")"
def __repr__(self) -> str:
return str(self)
def copy(self) -> "NoteGroup":
return NoteGroup([note.copy() for note in self.notes])
def transform_coordinates(
self, transformation: Callable[[tuple[float, float]], tuple[float, float]]
) -> "NoteGroup":
return NoteGroup([note.transform_coordinates(transformation) for note in self.notes])
class BarLine(SymbolOnStaff):
def __init__(self, box: RotatedBoundingBox):
super().__init__(box.center)
self.box = box
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
self.box.draw_onto_image(img, color)
def __str__(self) -> str:
return "BarLine(" + str(self.center) + ")"
def __repr__(self) -> str:
return str(self)
def copy(self) -> "BarLine":
return BarLine(self.box)
class Clef(SymbolOnStaff):
def __init__(self, box: BoundingBox):
super().__init__(box.center)
self.box = box
self.accidentals: list[Accidental] = []
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
self.box.draw_onto_image(img, color)
cv2.putText(
img,
"clef",
(self.box.box[0], self.box.box[1]),
cv2.FONT_HERSHEY_SIMPLEX,
1,
color,
2,
cv2.LINE_AA,
)
def __str__(self) -> str:
return "Clef(" + str(self.center) + ")"
def __repr__(self) -> str:
return str(self)
def copy(self) -> "Clef":
return Clef(self.box)
class StaffPoint:
def __init__(self, x: float, y: list[float], angle: float):
if len(y) != constants.number_of_lines_on_a_staff:
raise Exception("A staff must consist of exactly 5 lines")
self.x = x
self.y = y
self.angle = angle
self.average_unit_size = np.mean(np.diff(y))
def find_position_in_unit_sizes(self, box: AngledBoundingBox) -> int:
center = box.center
idx_of_closest_y = int(np.argmin(np.abs([y_value - center[1] for y_value in self.y])))
distance = self.y[idx_of_closest_y] - center[1]
distance_in_unit_sizes = round(2 * distance / self.average_unit_size)
position = (
2 * (constants.number_of_lines_on_a_staff - idx_of_closest_y)
+ distance_in_unit_sizes
- 1
)
return position # type: ignore
def transform_coordinates(
self, transformation: Callable[[tuple[float, float]], tuple[float, float]]
) -> "StaffPoint":
xy = [transformation((self.x, y_value)) for y_value in self.y]
average_x = np.mean([x for x, _ in xy])
return StaffPoint(float(average_x), [y for _, y in xy], self.angle)
def to_bounding_box(self) -> BoundingBox:
return BoundingBox(
[int(self.x), int(self.y[0]), int(self.x), int(self.y[-1])], np.array([]), -2
)
class Staff(DebugDrawable):
def __init__(self, grid: list[StaffPoint]):
self.grid = grid
self.min_x = grid[0].x
self.max_x = grid[-1].x
self.min_y = min([min(p.y) for p in grid])
self.max_y = max([max(p.y) for p in grid])
self.average_unit_size = np.median([p.average_unit_size for p in grid])
self.ledger_lines: list[RotatedBoundingBox] = []
self.symbols: list[SymbolOnStaff] = []
self._y_tolerance = constants.max_number_of_ledger_lines * self.average_unit_size
def is_on_staff_zone(self, item: AngledBoundingBox) -> bool:
point = self.get_at(item.center[0])
if point is None:
return False
if (
item.center[1] > point.y[-1] + self._y_tolerance
or item.center[1] < point.y[0] - self._y_tolerance
):
return False
return True
def add_symbol(self, symbol: SymbolOnStaff) -> None:
self.symbols.append(symbol)
def add_symbols(self, symbols: list[SymbolOnStaff]) -> None:
self.symbols.extend(symbols)
def get_measures(self) -> list[list[Note | NoteGroup]]:
measures: list[list[Note | NoteGroup]] = []
current_measure: list[Note | NoteGroup] = []
symbols_on_measure = self.get_notes() + self.get_note_groups() + self.get_bar_lines()
for symbol in sorted(symbols_on_measure, key=lambda s: s.center[0]):
if isinstance(symbol, BarLine):
measures.append(current_measure)
current_measure = []
else:
current_measure.append(symbol)
# Add the last measure
measures.append(current_measure)
# Remove empty measures
measures = [measure for measure in measures if len(measure) > 0]
return measures
def get_at(self, x: float) -> StaffPoint | None:
closest_point = min(self.grid, key=lambda p: abs(p.x - x))
if abs(closest_point.x - x) > constants.staff_position_tolerance:
return None
return closest_point
def y_distance_to(self, point: tuple[float, float]) -> float:
staff_point = self.get_at(point[0])
if staff_point is None:
return 1e10 # Something large to mimic infinity
return min([abs(y - point[1]) for y in staff_point.y])
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
for i in range(constants.number_of_lines_on_a_staff):
for j in range(len(self.grid) - 1):
p1 = self.grid[j]
p2 = self.grid[j + 1]
cv2.line(
img, (int(p1.x), int(p1.y[i])), (int(p2.x), int(p2.y[i])), color, thickness=2
)
def get_bar_lines(self) -> list[BarLine]:
result = []
for symbol in self.symbols:
if isinstance(symbol, BarLine):
result.append(symbol)
return result
def get_clefs(self) -> list[Clef]:
result = []
for symbol in self.symbols:
if isinstance(symbol, Clef):
result.append(symbol)
return result
def get_notes(self) -> list[Note]:
result = []
for symbol in self.symbols:
if isinstance(symbol, Note):
result.append(symbol)
return result
def get_accidentals(self) -> list[Accidental]:
result = []
for symbol in self.symbols:
if isinstance(symbol, Accidental):
result.append(symbol)
return result
def get_note_groups(self) -> list[NoteGroup]:
result = []
for symbol in self.symbols:
if isinstance(symbol, NoteGroup):
result.append(symbol)
return result
def get_notes_and_groups(self) -> list[Note | NoteGroup]:
result = []
for symbol in self.symbols:
if isinstance(symbol, Note | NoteGroup):
result.append(symbol)
return result
def get_all_except_notes(self) -> list[SymbolOnStaff]:
result = []
for symbol in self.symbols:
if not isinstance(symbol, Note):
result.append(symbol)
return result
def __str__(self) -> str:
return "Staff(" + str.join(", ", [str(s) for s in self.symbols]) + ")"
def __repr__(self) -> str:
return str(self)
def copy(self) -> "Staff":
return Staff(self.grid)
def transform_coordinates(
self, transformation: Callable[[tuple[float, float]], tuple[float, float]]
) -> "Staff":
copy = Staff([point.transform_coordinates(transformation) for point in self.grid])
copy.symbols = [symbol.transform_coordinates(transformation) for symbol in self.symbols]
return copy
class MultiStaff(DebugDrawable):
"""
A grand staff or a staff with multiple voices.
"""
def __init__(self, staffs: list[Staff], connections: list[RotatedBoundingBox]) -> None:
self.staffs = sorted(staffs, key=lambda s: s.min_y)
self.connections = connections
def merge(self, other: "MultiStaff") -> "MultiStaff":
unique_staffs = []
unique_connections = []
for staff in self.staffs + other.staffs:
if staff not in unique_staffs:
unique_staffs.append(staff)
for connection in self.connections + other.connections:
if connection not in unique_connections:
unique_connections.append(connection)
return MultiStaff(unique_staffs, unique_connections)
def break_apart(self) -> list["MultiStaff"]:
return [MultiStaff([staff], []) for staff in self.staffs]
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
for staff in self.staffs:
staff.draw_onto_image(img, color)
for connection in self.connections:
connection.draw_onto_image(img, color)
| 17,578 | Python | .py | 419 | 31.842482 | 98 | 0.578817 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,864 | color_adjust.py | liebharc_homr/homr/color_adjust.py | import math
import cv2
import numpy as np
import scipy # type: ignore
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
def get_dominant_color(
gray_scale: NDArray, color_range: range, default: int | None = None
) -> int | None:
if gray_scale.dtype != np.uint8:
raise Exception("Wrong image dtype")
# Create a mask for values in the range [min_val, max_val]
mask = (gray_scale >= color_range.start) & (gray_scale <= color_range.stop)
# Apply the mask to the grayscale image
masked_gray_scale = gray_scale[mask]
if masked_gray_scale.size == 0:
return default
bins = np.bincount(masked_gray_scale.flatten())
center_of_mass = scipy.ndimage.measurements.center_of_mass(bins)[0]
return int(center_of_mass)
def apply_clahe(channel: NDArray) -> NDArray:
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
return clahe.apply(channel)
def remove_background_from_channel(channel: NDArray, block_size: int) -> tuple[NDArray, NDArray]:
"""
Divides the image into blocks of size block_size and calculates
the dominant color of each block. The dominant color is then
used to create a background image, which is then used to divide the
original image. The result is an image with a more uniform background.
"""
x_range = range(0, channel.shape[0], block_size)
y_range = range(0, channel.shape[1], block_size)
background_pixels = np.zeros(
[math.ceil(x_range.stop / block_size), math.ceil(y_range.stop / block_size)], dtype=np.uint8
)
color_range = range(150, 254)
background = get_dominant_color(channel, color_range)
for i, row in enumerate(x_range):
for j, col in enumerate(y_range):
idx = (row, col)
block_idx = get_block_index(channel.shape, idx, block_size)
background_pixels[i, j] = get_dominant_color(
channel[block_idx], color_range, background
)
background_blurred = cv2.blur(background_pixels, (3, 3))
color_white = 255
valid_background = background_blurred < color_white # type: ignore
max_background = int(np.max(background_blurred[valid_background]))
background_blurred[valid_background] += color_white - max_background
result_background = cv2.resize(
background_blurred, (channel.shape[1], channel.shape[0]), interpolation=cv2.INTER_LINEAR
)
division = cv2.divide(channel, result_background, scale=color_white)
return division, result_background
def get_block_index(
image_shape: tuple[int, ...], yx: tuple[int, int], block_size: int
) -> tuple[NDArray, ...]:
"""
Creates a grid of indices for a block of pixels around a given pixel.
"""
y = np.arange(max(0, yx[0] - block_size), min(image_shape[0], yx[0] + block_size))
x = np.arange(max(0, yx[1] - block_size), min(image_shape[1], yx[1] + block_size))
return np.ix_(y, x)
def color_adjust(image: NDArray, block_size: int) -> tuple[NDArray, NDArray]:
"""
Reduce the effect of uneven lighting on the image by dividing the image by its interpolated
background.
"""
try:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image, background = remove_background_from_channel(image, block_size)
return cv2.cvtColor(apply_clahe(image), cv2.COLOR_GRAY2BGR), background
except Exception as e:
eprint(e)
return image, image
| 3,544 | Python | .py | 75 | 40.373333 | 101 | 0.67016 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,865 | main.py | liebharc_homr/homr/main.py | import argparse
import glob
import os
import sys
import cv2
import numpy as np
from homr import color_adjust, download_utils
from homr.accidental_detection import add_accidentals_to_staffs
from homr.accidental_rules import maintain_accidentals
from homr.autocrop import autocrop
from homr.bar_line_detection import (
add_bar_lines_to_staffs,
detect_bar_lines,
prepare_bar_line_image,
)
from homr.bounding_boxes import (
BoundingEllipse,
RotatedBoundingBox,
create_bounding_ellipses,
create_rotated_bounding_boxes,
)
from homr.brace_dot_detection import (
find_braces_brackets_and_grand_staff_lines,
prepare_brace_dot_image,
)
from homr.debug import Debug
from homr.model import InputPredictions
from homr.noise_filtering import filter_predictions
from homr.note_detection import add_notes_to_staffs, combine_noteheads_with_stems
from homr.resize import resize_image
from homr.rest_detection import add_rests_to_staffs
from homr.segmentation.config import segnet_path, unet_path
from homr.segmentation.segmentation import segmentation
from homr.simple_logging import eprint
from homr.staff_detection import break_wide_fragments, detect_staff, make_lines_stronger
from homr.staff_parsing import parse_staffs
from homr.title_detection import detect_title
from homr.transformer.configs import default_config
from homr.type_definitions import NDArray
from homr.xml_generator import XmlGeneratorArguments, generate_xml
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
class PredictedSymbols:
def __init__(
self,
noteheads: list[BoundingEllipse],
staff_fragments: list[RotatedBoundingBox],
clefs_keys: list[RotatedBoundingBox],
accidentals: list[RotatedBoundingBox],
stems_rest: list[RotatedBoundingBox],
bar_lines: list[RotatedBoundingBox],
) -> None:
self.noteheads = noteheads
self.staff_fragments = staff_fragments
self.clefs_keys = clefs_keys
self.accidentals = accidentals
self.stems_rest = stems_rest
self.bar_lines = bar_lines
def get_predictions(
original: NDArray, preprocessed: NDArray, img_path: str, save_cache: bool
) -> InputPredictions:
result = segmentation(preprocessed, img_path, use_cache=save_cache)
original_image = cv2.resize(original, (result.staff.shape[1], result.staff.shape[0]))
preprocessed_image = cv2.resize(preprocessed, (result.staff.shape[1], result.staff.shape[0]))
return InputPredictions(
original=original_image,
preprocessed=preprocessed_image,
notehead=result.notehead.astype(np.uint8),
symbols=result.symbols.astype(np.uint8),
staff=result.staff.astype(np.uint8),
clefs_keys=result.clefs_keys.astype(np.uint8),
stems_rest=result.stems_rests.astype(np.uint8),
)
def replace_extension(path: str, new_extension: str) -> str:
return (
path.replace(".png", new_extension)
.replace(".jpg", new_extension)
.replace(".jpeg", new_extension)
)
def load_and_preprocess_predictions(
image_path: str, enable_debug: bool, enable_cache: bool
) -> tuple[InputPredictions, Debug]:
image = cv2.imread(image_path)
image = autocrop(image)
image = resize_image(image)
preprocessed, _background = color_adjust.color_adjust(image, 40)
predictions = get_predictions(image, preprocessed, image_path, enable_cache)
debug = Debug(predictions.original, image_path, enable_debug)
debug.write_image("color_adjust", preprocessed)
predictions = filter_predictions(predictions, debug)
predictions.staff = make_lines_stronger(predictions.staff, (1, 2))
debug.write_threshold_image("staff", predictions.staff)
debug.write_threshold_image("symbols", predictions.symbols)
debug.write_threshold_image("stems_rest", predictions.stems_rest)
debug.write_threshold_image("notehead", predictions.notehead)
debug.write_threshold_image("clefs_keys", predictions.clefs_keys)
return predictions, debug
def predict_symbols(debug: Debug, predictions: InputPredictions) -> PredictedSymbols:
eprint("Creating bounds for noteheads")
noteheads = create_bounding_ellipses(predictions.notehead, min_size=(4, 4))
eprint("Creating bounds for staff_fragments")
staff_fragments = create_rotated_bounding_boxes(
predictions.staff, skip_merging=True, min_size=(5, 1), max_size=(10000, 100)
)
eprint("Creating bounds for clefs_keys")
clefs_keys = create_rotated_bounding_boxes(
predictions.clefs_keys, min_size=(20, 40), max_size=(1000, 1000)
)
eprint("Creating bounds for accidentals")
accidentals = create_rotated_bounding_boxes(
predictions.clefs_keys, min_size=(5, 5), max_size=(100, 100)
)
eprint("Creating bounds for stems_rest")
stems_rest = create_rotated_bounding_boxes(predictions.stems_rest)
eprint("Creating bounds for bar_lines")
bar_line_img = prepare_bar_line_image(predictions.stems_rest)
debug.write_threshold_image("bar_line_img", bar_line_img)
bar_lines = create_rotated_bounding_boxes(bar_line_img, skip_merging=True, min_size=(1, 5))
return PredictedSymbols(
noteheads, staff_fragments, clefs_keys, accidentals, stems_rest, bar_lines
)
def process_image( # noqa: PLR0915
image_path: str,
enable_debug: bool,
enable_cache: bool,
xml_generator_args: XmlGeneratorArguments,
) -> tuple[str, str, str]:
eprint("Processing " + image_path)
predictions, debug = load_and_preprocess_predictions(image_path, enable_debug, enable_cache)
xml_file = replace_extension(image_path, ".musicxml")
try:
eprint("Loaded segmentation")
symbols = predict_symbols(debug, predictions)
eprint("Predicted symbols")
symbols.staff_fragments = break_wide_fragments(symbols.staff_fragments)
debug.write_bounding_boxes("staff_fragments", symbols.staff_fragments)
eprint("Found " + str(len(symbols.staff_fragments)) + " staff line fragments")
noteheads_with_stems, likely_bar_or_rests_lines = combine_noteheads_with_stems(
symbols.noteheads, symbols.stems_rest
)
debug.write_bounding_boxes_alternating_colors("notehead_with_stems", noteheads_with_stems)
eprint("Found " + str(len(noteheads_with_stems)) + " noteheads")
if len(noteheads_with_stems) == 0:
raise Exception("No noteheads found")
average_note_head_height = float(
np.median([notehead.notehead.size[1] for notehead in noteheads_with_stems])
)
eprint("Average note head height: " + str(average_note_head_height))
all_noteheads = [notehead.notehead for notehead in noteheads_with_stems]
all_stems = [note.stem for note in noteheads_with_stems if note.stem is not None]
bar_lines_or_rests = [
line
for line in symbols.bar_lines
if not line.is_overlapping_with_any(all_noteheads)
and not line.is_overlapping_with_any(all_stems)
]
bar_line_boxes = detect_bar_lines(bar_lines_or_rests, average_note_head_height)
debug.write_bounding_boxes_alternating_colors("bar_lines", bar_line_boxes)
eprint("Found " + str(len(bar_line_boxes)) + " bar lines")
debug.write_bounding_boxes(
"anchor_input", symbols.staff_fragments + bar_line_boxes + symbols.clefs_keys
)
staffs = detect_staff(
debug, predictions.staff, symbols.staff_fragments, symbols.clefs_keys, bar_line_boxes
)
if len(staffs) == 0:
raise Exception("No staffs found")
debug.write_bounding_boxes_alternating_colors("staffs", staffs)
global_unit_size = np.mean([staff.average_unit_size for staff in staffs])
bar_lines_found = add_bar_lines_to_staffs(staffs, bar_line_boxes)
eprint("Found " + str(len(bar_lines_found)) + " bar lines")
possible_rests = [
rest for rest in bar_lines_or_rests if not rest.is_overlapping_with_any(bar_line_boxes)
]
rests = add_rests_to_staffs(staffs, possible_rests)
eprint("Found", len(rests), "rests")
all_classified = predictions.notehead + predictions.clefs_keys + predictions.stems_rest
brace_dot_img = prepare_brace_dot_image(
predictions.symbols, predictions.staff, all_classified, global_unit_size
)
debug.write_threshold_image("brace_dot", brace_dot_img)
brace_dot = create_rotated_bounding_boxes(
brace_dot_img, skip_merging=True, max_size=(100, -1)
)
notes = add_notes_to_staffs(
staffs, noteheads_with_stems, predictions.symbols, predictions.notehead
)
accidentals = add_accidentals_to_staffs(staffs, symbols.accidentals)
eprint("Found", len(accidentals), "accidentals")
multi_staffs = find_braces_brackets_and_grand_staff_lines(debug, staffs, brace_dot)
eprint(
"Found",
len(multi_staffs),
"connected staffs (after merging grand staffs, multiple voices): ",
[len(staff.staffs) for staff in multi_staffs],
)
debug.write_all_bounding_boxes_alternating_colors(
"notes", multi_staffs, notes, rests, accidentals
)
title = detect_title(debug, staffs[0])
eprint("Found title: " + title)
result_staffs = parse_staffs(debug, multi_staffs, predictions)
result_staffs = maintain_accidentals(result_staffs)
eprint("Writing XML")
xml = generate_xml(xml_generator_args, result_staffs, title)
xml.write(xml_file)
eprint(
"Finished parsing "
+ str(len(result_staffs))
+ " voices over "
+ str(sum(staff.number_of_new_lines() for staff in result_staffs))
+ " staves"
)
teaser_file = replace_extension(image_path, "_teaser.png")
debug.write_teaser(teaser_file, staffs)
debug.clean_debug_files_from_previous_runs()
eprint("Result was written to", xml_file)
return xml_file, title, teaser_file
except:
if os.path.exists(xml_file):
os.remove(xml_file)
raise
finally:
debug.clean_debug_files_from_previous_runs()
def get_all_image_files_in_folder(folder: str) -> list[str]:
image_files = []
for ext in ["png", "jpg", "jpeg"]:
image_files.extend(glob.glob(os.path.join(folder, "**", f"*.{ext}"), recursive=True))
without_teasers = [
img
for img in image_files
if "_teaser" not in img
and "_debug" not in img
and "_staff" not in img
and "_tesseract" not in img
]
return sorted(without_teasers)
def download_weights() -> None:
base_url = "https://github.com/liebharc/homr/releases/download/checkpoints/"
models = [segnet_path, unet_path, default_config.filepaths.checkpoint]
missing_models = [model for model in models if not os.path.exists(model)]
if len(missing_models) == 0:
return
eprint("Downloading", len(missing_models), "models - this is only required once")
for model in missing_models:
if not os.path.exists(model) or True:
base_name = os.path.basename(model).split(".")[0]
eprint(f"Downloading {base_name}")
try:
zip_name = base_name + ".zip"
download_url = base_url + zip_name
downloaded_zip = os.path.join(os.path.dirname(model), zip_name)
download_utils.download_file(download_url, downloaded_zip)
destination_dir = os.path.dirname(model)
download_utils.unzip_file(downloaded_zip, destination_dir)
finally:
if os.path.exists(downloaded_zip):
os.remove(downloaded_zip)
def main() -> None:
parser = argparse.ArgumentParser(
prog="homer", description="An optical music recognition (OMR) system"
)
parser.add_argument("image", type=str, nargs="?", help="Path to the image to process")
parser.add_argument(
"--init",
action="store_true",
help="Downloads the models if they are missing and then exits. "
+ "You don't have to call init before processing images, "
+ "it's only useful if you want to prepare for example a Docker image.",
)
parser.add_argument("--debug", action="store_true", help="Enable debug output")
parser.add_argument(
"--cache", action="store_true", help="Read an existing cache file or create a new one"
)
parser.add_argument(
"--output-large-page",
action="store_true",
help="Adds instructions to the musicxml so that it gets rendered on larger pages",
)
parser.add_argument(
"--output-metronome", type=int, help="Adds a metronome to the musicxml with the given bpm"
)
parser.add_argument(
"--output-tempo", type=int, help="Adds a tempo to the musicxml with the given bpm"
)
args = parser.parse_args()
download_weights()
if args.init:
eprint("Init finished")
return
xml_generator_args = XmlGeneratorArguments(
args.output_large_page, args.output_metronome, args.output_tempo
)
if not args.image:
eprint("No image provided")
parser.print_help()
sys.exit(1)
elif os.path.isfile(args.image):
process_image(args.image, args.debug, args.cache, xml_generator_args)
elif os.path.isdir(args.image):
image_files = get_all_image_files_in_folder(args.image)
eprint("Processing", len(image_files), "files:", image_files)
error_files = []
for image_file in image_files:
eprint("=========================================")
try:
process_image(image_file, args.debug, args.cache, xml_generator_args)
eprint("Finished", image_file)
except Exception as e:
eprint(f"An error occurred while processing {image_file}: {e}")
error_files.append(image_file)
if len(error_files) > 0:
eprint("Errors occurred while processing the following files:", error_files)
else:
raise ValueError(f"{args.image} is not a valid file or directory")
if __name__ == "__main__":
main()
| 14,403 | Python | .py | 316 | 38.075949 | 99 | 0.67277 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,866 | note_detection.py | liebharc_homr/homr/note_detection.py | import cv2.typing as cvt
import numpy as np
from homr import constants
from homr.bounding_boxes import BoundingEllipse, DebugDrawable, RotatedBoundingBox
from homr.model import Note, NoteGroup, Staff, StemDirection, SymbolOnStaff
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
class NoteheadWithStem(DebugDrawable):
def __init__(
self,
notehead: BoundingEllipse,
stem: RotatedBoundingBox | None,
stem_direction: StemDirection | None = None,
):
self.notehead = notehead
self.stem = stem
self.stem_direction = stem_direction
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
self.notehead.draw_onto_image(img, color)
if self.stem is not None:
self.stem.draw_onto_image(img, color)
def adjust_bbox(bbox: cvt.Rect, noteheads: NDArray) -> cvt.Rect:
region = noteheads[bbox[1] : bbox[3], bbox[0] : bbox[2]]
ys, _ = np.where(region > 0)
if len(ys) == 0:
# Invalid note. Will be eliminated with zero height.
return bbox
top = np.min(ys) + bbox[1] - 1
bottom = np.max(ys) + bbox[1] + 1
return (bbox[0], int(top), bbox[2], int(bottom))
def get_center(bbox: cvt.Rect) -> tuple[int, int]:
cen_y = int(round((bbox[1] + bbox[3]) / 2))
cen_x = int(round((bbox[0] + bbox[2]) / 2))
return cen_x, cen_y
def check_bbox_size(bbox: cvt.Rect, noteheads: NDArray, unit_size: float) -> list[cvt.Rect]:
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
cen_x, _ = get_center(bbox)
note_w = constants.NOTEHEAD_SIZE_RATIO * unit_size
note_h = unit_size
new_bbox: list[cvt.Rect] = []
if abs(w - note_w) > abs(w - note_w * 2):
# Contains at least two notes, one left and one right.
left_box: cvt.Rect = (bbox[0], bbox[1], cen_x, bbox[3])
right_box: cvt.Rect = (cen_x, bbox[1], bbox[2], bbox[3])
# Upper and lower bounds could have changed
left_box = adjust_bbox(left_box, noteheads)
right_box = adjust_bbox(right_box, noteheads)
# Check recursively
if left_box is not None:
new_bbox.extend(check_bbox_size(left_box, noteheads, unit_size))
if right_box is not None:
new_bbox.extend(check_bbox_size(right_box, noteheads, unit_size))
# Check height
if len(new_bbox) > 0:
tmp_new = []
for box in new_bbox:
tmp_new.extend(check_bbox_size(box, noteheads, unit_size))
new_bbox = tmp_new
else:
num_notes = int(round(h / note_h))
if num_notes > 0:
sub_h = h // num_notes
for i in range(num_notes):
sub_box = (
bbox[0],
round(bbox[1] + i * sub_h),
bbox[2],
round(bbox[1] + (i + 1) * sub_h),
)
new_bbox.append(sub_box)
return new_bbox
def split_clumps_of_noteheads(
notehead: NoteheadWithStem, noteheads: NDArray, staff: Staff
) -> list[NoteheadWithStem]:
"""
Note heads might be clumped together by the notehead detection algorithm.
"""
bbox = [
int(notehead.notehead.top_left[0]),
int(notehead.notehead.top_left[1]),
int(notehead.notehead.bottom_right[0]),
int(notehead.notehead.bottom_right[1]),
]
split_boxes = check_bbox_size(bbox, noteheads, staff.average_unit_size)
if len(split_boxes) <= 1:
return [notehead]
result = []
for box in split_boxes:
center = get_center(box)
size = (box[2] - box[0], box[3] - box[1])
notehead = NoteheadWithStem(
BoundingEllipse(
(center, size, 0), notehead.notehead.contours, notehead.notehead.debug_id
),
notehead.stem,
notehead.stem_direction,
)
result.append(notehead)
return result
def combine_noteheads_with_stems(
noteheads: list[BoundingEllipse], stems: list[RotatedBoundingBox]
) -> tuple[list[NoteheadWithStem], list[RotatedBoundingBox]]:
"""
Combines noteheads with their stems as this tells us
what vertical lines are stems and which are bar lines.
"""
result = []
noteheads = sorted(noteheads, key=lambda notehead: notehead.box[0][1])
used_stems = set()
for notehead in noteheads:
thickened_notehead = notehead.make_box_thicker(15)
found_stem = False
for stem in stems:
if stem.is_overlapping(thickened_notehead):
is_stem_above = stem.center[1] < notehead.center[1]
if is_stem_above:
direction = StemDirection.UP
else:
direction = StemDirection.DOWN
result.append(NoteheadWithStem(notehead, stem, direction))
used_stems.add(stem)
found_stem = True
break
if not found_stem:
result.append(NoteheadWithStem(notehead, None, None))
unaccounted_stems_or_bars = [stem for stem in stems if stem not in used_stems]
return result, unaccounted_stems_or_bars
def _are_notes_likely_a_chord(note1: Note, note2: Note, tolerance: float) -> bool:
if note1.stem is None or note2.stem is None:
return abs(note1.center[0] - note2.center[0]) < tolerance
return abs(note1.stem.center[0] - note2.stem.center[0]) < tolerance
def _create_note_group(notes: list[Note]) -> Note | NoteGroup:
if len(notes) == 1:
return notes[0]
result = NoteGroup(notes)
return result
def _group_notes_on_staff(staff: Staff) -> None:
notes = staff.get_notes()
groups: list[list[Note]] = []
for note in notes:
group_found = False
for group in groups:
for grouped_note in group:
if _are_notes_likely_a_chord(
note, grouped_note, constants.tolerance_note_grouping(staff.average_unit_size)
):
group_found = True
group.append(note)
break
if group_found:
break
if not group_found:
groups.append([note])
note_groups: list[SymbolOnStaff] = [_create_note_group(group) for group in groups]
note_groups.extend(staff.get_all_except_notes())
sorted_by_x = sorted(note_groups, key=lambda group: group.center[0])
staff.symbols = sorted_by_x
def add_notes_to_staffs(
staffs: list[Staff], noteheads: list[NoteheadWithStem], symbols: NDArray, notehead_pred: NDArray
) -> list[Note]:
result = []
for staff in staffs:
for notehead_chunk in noteheads:
if not staff.is_on_staff_zone(notehead_chunk.notehead):
continue
center = notehead_chunk.notehead.center
point = staff.get_at(center[0])
if point is None:
continue
if (
notehead_chunk.notehead.size[0] < 0.5 * point.average_unit_size
or notehead_chunk.notehead.size[1] < 0.5 * point.average_unit_size
):
continue
for notehead in split_clumps_of_noteheads(notehead_chunk, notehead_pred, staff):
point = staff.get_at(center[0])
if point is None:
continue
if (
notehead.notehead.size[0] < 0.5 * point.average_unit_size
or notehead.notehead.size[0] > 3 * point.average_unit_size
or notehead.notehead.size[1] < 0.5 * point.average_unit_size
or notehead.notehead.size[1] > 2 * point.average_unit_size
):
continue
position = point.find_position_in_unit_sizes(notehead.notehead)
note = Note(notehead.notehead, position, notehead.stem, notehead.stem_direction)
result.append(note)
staff.add_symbol(note)
number_of_notes = 0
number_of_note_groups = 0
for staff in staffs:
_group_notes_on_staff(staff)
number_of_notes += len(staff.get_notes())
number_of_note_groups += len(staff.get_note_groups())
eprint(
"After grouping there are",
number_of_notes,
"notes and",
number_of_note_groups,
"note groups",
)
return result
| 8,635 | Python | .py | 204 | 31.70098 | 101 | 0.583642 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,867 | staff_detection.py | liebharc_homr/homr/staff_detection.py | from collections.abc import Generator, Iterable
import cv2
import cv2.typing as cvt
import numpy as np
from scipy import signal # type: ignore
from homr import constants
from homr.bounding_boxes import (
DebugDrawable,
RotatedBoundingBox,
create_rotated_bounding_box,
)
from homr.debug import Debug
from homr.model import Staff, StaffPoint
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
def prepare_staff_image(img: NDArray) -> NDArray:
"""
Remove small details.
"""
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 3))
out = cv2.erode(img.astype(np.uint8), kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 3))
return cv2.dilate(out, kernel)
def make_lines_stronger(img: NDArray, kernel_size: tuple[int, int]) -> NDArray:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_size)
img = cv2.dilate(img.astype(np.uint8), kernel)
img = cv2.threshold(img, 0.1, 1, cv2.THRESH_BINARY)[1].astype(np.uint8)
return img
class StaffLineSegment(DebugDrawable):
def __init__(self, debug_id: int, staff_fragments: list[RotatedBoundingBox]):
self.debug_id = debug_id
self.staff_fragments = sorted(staff_fragments, key=lambda box: box.box[0][0])
self.min_x = min([line.center[0] - line.size[0] / 2 for line in staff_fragments])
self.max_x = max([line.center[0] + line.size[0] / 2 for line in staff_fragments])
self.min_y = min([line.center[1] - line.size[1] / 2 for line in staff_fragments])
self.max_y = max([line.center[1] + line.size[1] / 2 for line in staff_fragments])
def merge(self, other: "StaffLineSegment") -> "StaffLineSegment":
staff_lines = self.staff_fragments.copy()
for fragment in other.staff_fragments:
if fragment not in staff_lines:
staff_lines.append(fragment)
return StaffLineSegment(self.debug_id, staff_lines)
def get_at(self, x: float) -> RotatedBoundingBox | None:
tolerance = constants.staff_line_segment_x_tolerance
for fragment in self.staff_fragments:
if (
x >= fragment.center[0] - fragment.size[0] / 2 - tolerance
and x <= fragment.center[0] + fragment.size[0] / 2 + tolerance
):
return fragment
return None
def is_overlapping(self, other: "StaffLineSegment") -> bool:
for line in self.staff_fragments:
for other_line in other.staff_fragments:
if line.is_overlapping(other_line):
return True
return False
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
for line in self.staff_fragments:
line.draw_onto_image(img, color)
cv2.putText(
img,
str(self.debug_id),
(int(self.staff_fragments[0].box[0][0]), int(self.staff_fragments[0].box[0][1])),
cv2.FONT_HERSHEY_SIMPLEX,
1,
color,
2,
cv2.LINE_AA,
)
class StaffAnchor(DebugDrawable):
"""
An anchor is what we call a reliable staff line. That is five parlallel bar lines
which by their relation to other symbols make it likely that they belong to a staff.
This is a crucial step as it allows us to then build the complete staff.
"""
def __init__(self, staff_lines: list[StaffLineSegment], symbol: RotatedBoundingBox):
self.staff_lines = staff_lines
y_positions = sorted(
[
line.staff_fragments[0].get_center_extrapolated(symbol.center[0])
for line in staff_lines
]
)
y_deltas = [abs(y_positions[i] - y_positions[i - 1]) for i in range(1, len(y_positions))]
self.unit_sizes = y_deltas
if len(y_deltas) == 0:
self.average_unit_size = 0.0
else:
self.average_unit_size = float(np.mean(y_deltas))
self.symbol = symbol
self.max_y = max([line.max_y for line in staff_lines])
self.min_y = min([line.min_y for line in staff_lines])
max_number_of_ledger_lines = 5
self.y_range = range(int(min(y_positions)), int(max(y_positions)))
self.zone = range(
int(self.min_y - max_number_of_ledger_lines * self.average_unit_size),
int(self.max_y + max_number_of_ledger_lines * self.average_unit_size),
)
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (0, 255, 0)) -> None:
for staff in self.staff_lines:
staff.draw_onto_image(img, color)
self.symbol.draw_onto_image(img, color)
x = int(self.symbol.center[0])
cv2.line(img, [x - 50, self.zone.start], [x + 50, self.zone.start], color, 2)
cv2.line(img, [x - 50, self.zone.stop], [x + 50, self.zone.stop], color, 2)
def _get_all_contours(lines: list[StaffLineSegment]) -> list[cvt.MatLike]:
all_fragments: list[RotatedBoundingBox] = []
for line in lines:
all_fragments.extend(line.staff_fragments)
result: list[cvt.MatLike] = []
for fragment in all_fragments:
result.extend(fragment.contours)
return result
class RawStaff(RotatedBoundingBox):
"""
A raw staff is made of parts which we found on the image. It has gaps and segments start and
end differently on every staff line.
"""
def __init__(self, staff_id: int, lines: list[StaffLineSegment], anchors: list[StaffAnchor]):
contours = _get_all_contours(lines)
box = cv2.minAreaRect(np.array(contours))
super().__init__(box, np.concatenate(contours), staff_id)
self.staff_id = staff_id
self.lines = lines
self.anchors = anchors
self.min_x = self.center[0] - self.size[0] / 2
self.max_x = self.center[0] + self.size[0] / 2
self.min_y = self.center[1] - self.size[1] / 2
self.max_y = self.center[1] + self.size[1] / 2
def merge(self, other: "RawStaff") -> "RawStaff":
lines = []
for i, line in enumerate(self.lines):
lines.append(other.lines[i].merge(line))
return RawStaff(self.staff_id, lines, self.anchors + other.anchors)
def draw_onto_image(self, img: NDArray, color: tuple[int, int, int] = (255, 0, 0)) -> None:
for line in self.lines:
line.draw_onto_image(img, color)
def get_staff_for_anchor(anchor: StaffAnchor, staffs: list[RawStaff]) -> RawStaff | None:
for staff in staffs:
for i, anchor_line in enumerate(anchor.staff_lines):
line_requirement = set(anchor_line.staff_fragments)
if line_requirement.issubset(set(staff.lines[i].staff_fragments)):
return staff
return None
def find_raw_staffs_by_connecting_line_fragments(
anchors: list[StaffAnchor], staff_fragments: list[RotatedBoundingBox]
) -> list[RawStaff]:
"""
First we build a list of all lines by combining fragments. Then we identify the lines
which go through the anchors and build a staff from them.
"""
staffs: list[RawStaff] = []
staff_id = 0
for anchor in anchors:
existing_staff = get_staff_for_anchor(anchor, staffs)
fragments = [
fragment
for fragment in staff_fragments
if fragment.center[1] >= anchor.zone.start and fragment.center[1] <= anchor.zone.stop
]
connected = connect_staff_lines(fragments, anchor.average_unit_size)
staff_lines: list[StaffLineSegment] = []
for anchor_line in anchor.staff_lines:
line_requirement = set(anchor_line.staff_fragments)
matching_anchor = [
line for line in connected if line_requirement.issubset(set(line.staff_fragments))
]
if len(matching_anchor) == 1:
staff_lines.extend(matching_anchor)
else:
staff_lines.append(anchor_line)
if existing_staff:
staffs.remove(existing_staff)
staffs.append(existing_staff.merge(RawStaff(staff_id, staff_lines, [anchor])))
else:
staffs.append(RawStaff(staff_id, staff_lines, [anchor]))
staff_id += 1
return staffs
def remove_duplicate_staffs(staffs: list[RawStaff]) -> list[RawStaff]:
"""
Sometimes we find the same staff twice, but fail to connect them.
This function removes the duplicates.
"""
result: list[RawStaff] = []
for staff in staffs:
overlapping = [other for other in result if staff.is_overlapping(other)]
if len(overlapping) == 0:
result.append(staff)
continue
staff_duplicates = 2
if len(overlapping) >= staff_duplicates:
# Think this through again, for the moment we just take the existing ones
continue
if len(overlapping[0].anchors) < len(staff.anchors):
# The staff with the most anchors is the most reliable one
result = [s for s in result if s != overlapping[0]]
result.append(staff)
return result
def connect_staff_lines(
staff_lines: list[RotatedBoundingBox], unit_size: float
) -> list[StaffLineSegment]:
"""
Checks which fragments connect to each other (extrapolation is used to fill gaps)
and builds a list of StaffLineSegments
where segments have an increased likelyhood to belong to a staff.
"""
# With the pop below we are going through the elements from left to right
sorted_by_right_to_left = sorted(staff_lines, key=lambda box: box.box[0][0], reverse=True)
result: list[list[RotatedBoundingBox]] = []
while len(sorted_by_right_to_left) > 0:
current_staff_line: RotatedBoundingBox = sorted_by_right_to_left.pop()
is_short_line = current_staff_line.box[1][0] < constants.is_short_line(unit_size)
if is_short_line:
continue
connected = False
for staff_lines in result:
if staff_lines[-1].is_overlapping_extrapolated(current_staff_line, unit_size):
staff_lines.append(current_staff_line)
connected = True
if not connected:
result.append([current_staff_line])
result_top_to_bottom = sorted(result, key=lambda lines: lines[0].box[0][1])
connected_lines = [
StaffLineSegment(i, staff_lines) for i, staff_lines in enumerate(result_top_to_bottom)
]
return connected_lines
def are_lines_crossing(lines: list[StaffLineSegment]) -> bool:
for i in range(len(lines)):
for j in range(i + 1, len(lines)):
if lines[i].is_overlapping(lines[j]):
return True
return False
def are_lines_parallel(lines: list[StaffLineSegment], unit_size: float) -> bool:
all_angles = []
all_fragments: list[RotatedBoundingBox] = []
for line in lines:
for fragment in line.staff_fragments:
all_angles.append(fragment.angle)
all_fragments.append(fragment)
if len(all_angles) == 0:
return False
average_angle = np.mean(all_angles)
for fragment in all_fragments:
if abs(
fragment.angle - average_angle
) > constants.max_angle_for_lines_to_be_parallel and fragment.size[
0
] > constants.is_short_connected_line(
unit_size
):
return False
return True
def begins_or_ends_on_one_staff_line(
line: RotatedBoundingBox, staff_lines: list[StaffLineSegment], unit_size: float
) -> bool:
for staff_line in staff_lines:
fragment = staff_line.get_at(line.center[0])
if fragment is None:
continue
staff_y = fragment.get_center_extrapolated(line.center[0])
if abs(staff_y - line.center[1]) < unit_size:
return True
return False
def find_staff_anchors(
staff_lines: list[RotatedBoundingBox],
anchor_symbols: list[RotatedBoundingBox],
are_clefs: bool = False,
) -> list[StaffAnchor]:
"""
Finds staff anchors by looking for five parallel bar lines which go
over or interrupt symbols which are always on staffs
(and never above or beyond them like notes can be).
"""
result: list[StaffAnchor] = []
for center_symbol in anchor_symbols:
# As the symbol disconnects the staff lines it's the hardest to detect them at the center.
# Therefore we try to detect them at the left and right side of the symbol as well.
if are_clefs:
adjacent = [
center_symbol,
center_symbol.move_to_x_horizontal_by(50),
center_symbol,
center_symbol.move_to_x_horizontal_by(100),
center_symbol,
center_symbol.move_to_x_horizontal_by(150),
]
else:
adjacent = [
center_symbol.move_to_x_horizontal_by(-10),
center_symbol.move_to_x_horizontal_by(-5),
center_symbol,
center_symbol.move_to_x_horizontal_by(5),
center_symbol.move_to_x_horizontal_by(10),
]
for symbol in adjacent:
estimated_unit_size = round(symbol.size[1] / (constants.number_of_lines_on_a_staff - 1))
thickened_bar_line = symbol.make_box_taller(estimated_unit_size)
overlapping_staff_lines = [
line for line in staff_lines if line.is_intersecting(thickened_bar_line)
]
connected_lines = connect_staff_lines(overlapping_staff_lines, estimated_unit_size)
if len(connected_lines) > constants.number_of_lines_on_a_staff:
connected_lines = [
line
for line in connected_lines
if (line.max_x - line.min_x)
> constants.is_short_connected_line(estimated_unit_size)
]
if are_lines_crossing(connected_lines) or not are_lines_parallel(
connected_lines, estimated_unit_size
):
continue
if not are_clefs and not begins_or_ends_on_one_staff_line(
symbol, connected_lines, estimated_unit_size
):
continue
if not len(connected_lines) == constants.number_of_lines_on_a_staff:
continue
result.append(StaffAnchor(connected_lines, symbol))
return result
def resample_staff_segment( # noqa: C901
anchor: StaffAnchor, staff: RawStaff, axis_range: Iterable[int]
) -> Generator[StaffPoint, None, None]:
x = anchor.symbol.center[0]
line_fragments = [line.staff_fragments[0] for line in anchor.staff_lines]
centers: list[float] = [line.get_center_extrapolated(x) for line in line_fragments]
previous_point = StaffPoint(
x, centers, float(np.mean([line.angle for line in line_fragments]))
) # Dummy point at the anchor points
for x in axis_range:
lines = [line.get_at(x) for line in staff.lines]
axis_center = [
line.get_center_extrapolated(x) if line is not None else None for line in lines
]
center_values = [center for center in axis_center if center is not None]
incomplete = all(center is None for center in axis_center)
if incomplete:
continue
deltas = np.diff(center_values)
non_parallel = [delta < 0.5 * anchor.average_unit_size for delta in deltas]
for i, invalid in enumerate(non_parallel):
if invalid:
axis_center[i] = None
axis_center[i + 1] = None
for i, previous_y in enumerate(previous_point.y):
center_value = axis_center[i]
if (
center_value is not None
and abs(center_value - previous_y) > 0.5 * anchor.average_unit_size
):
axis_center[i] = None
prev_center = -1
for i in list(range(len(axis_center))) + list(reversed(list(range(len(axis_center))))):
if axis_center[i] is not None:
prev_center = i
elif prev_center >= 0:
center_value = axis_center[prev_center]
if center_value is not None:
axis_center[i] = center_value + anchor.average_unit_size * (i - prev_center)
incomplete = any(center is None for center in axis_center)
if incomplete:
continue
angle = float(np.mean([line.angle for line in lines if line is not None]))
previous_point = StaffPoint(x, [c for c in axis_center if c is not None], angle)
yield previous_point
def resample_staff(staff: RawStaff) -> Staff:
anchors_left_to_right = sorted(staff.anchors, key=lambda a: a.symbol.center[0])
staff_density = 10
start = (staff.min_x // staff_density) * staff_density
stop = (staff.max_x // staff_density + 1) * staff_density
current_anchor = 0
anchor = anchors_left_to_right[current_anchor]
grid: list[StaffPoint] = []
x = start
for i, anchor in enumerate(anchors_left_to_right):
to_left = range(int(x), int(anchor.symbol.center[0]), staff_density)
if i < len(anchors_left_to_right) - 1:
to_right = range(
int(anchor.symbol.center[0]),
int((anchor.symbol.center[0] + anchors_left_to_right[i + 1].symbol.center[0]) / 2),
staff_density,
)
else:
to_right = range(int(anchor.symbol.center[0]), int(stop), staff_density)
x = to_right.stop
grid.extend(reversed(list(resample_staff_segment(anchor, staff, reversed(to_left)))))
grid.extend(resample_staff_segment(anchor, staff, to_right))
return Staff(grid)
def resample_staffs(staffs: list[RawStaff]) -> list[Staff]:
"""
The RawStaffs might have gaps and segments start and end differently on every staff line.
This function resamples the staffs so for every point of the staff we know the y positions
of all staff lines. In the end this makes the staffs easier to use in the rest of
the analysis.
"""
result = []
for staff in staffs:
result.append(resample_staff(staff))
return result
def range_intersect(r1: range, r2: range) -> range | None:
return range(max(r1.start, r2.start), min(r1.stop, r2.stop)) or None
def filter_edge_of_vision(staffs: list[Staff], image_shape: tuple[int, ...]) -> list[Staff]:
"""
Removes staffs which begin in at the right edge or at the lower edge of the image,
as this are very likely incomplete staffs.
"""
result = []
for staff in staffs:
starts_at_right_edge = staff.min_x > 0.90 * image_shape[1]
starts_at_bottom_edge = staff.min_y > 0.95 * image_shape[0]
ends_at_left_edge = staff.max_x < 0.20 * image_shape[1]
if any([starts_at_right_edge, starts_at_bottom_edge, ends_at_left_edge]):
continue
result.append(staff)
return result
def sort_staffs_top_to_bottom(staffs: list[Staff]) -> list[Staff]:
return sorted(staffs, key=lambda staff: staff.min_y)
def filter_unusual_anchors(anchors: list[StaffAnchor]) -> list[StaffAnchor]:
unit_sizes = [anchor.average_unit_size for anchor in anchors]
average_unit_size = np.mean(unit_sizes)
unit_size_deviation = np.std(unit_sizes)
result = []
for anchor in anchors:
if abs(anchor.average_unit_size - average_unit_size) > 2 * unit_size_deviation:
continue
result.append(anchor)
return result
def init_zone(clef_anchors: list[StaffAnchor], image_shape: tuple[int, ...]) -> list[range]:
def make_range(start: float, stop: float) -> range:
return range(max(int(start), 0), min(int(stop), image_shape[1]))
# We increase the range only right of the clef as it's the only place
# where we expect to find staff lines
margin_right = 10
ranges = [
make_range(c.symbol.bottom_left[0], c.symbol.top_right[0] + margin_right)
for c in clef_anchors
]
ranges = sorted(ranges, key=lambda r: r.start)
result = []
for i, r in enumerate(ranges):
if i == 0:
result.append(r)
else:
overlaps_with_the_last = r.start < result[-1].stop
if overlaps_with_the_last:
result[-1] = range(result[-1].start, r.stop)
else:
result.append(r)
return result
def filter_line_peaks(
peaks: NDArray, norm: NDArray, max_gap_ratio: float = 1.5
) -> tuple[NDArray, list[int]]:
valid_peaks = np.array([True for _ in range(len(peaks))])
# Filter by height
for idx, p in enumerate(peaks):
max_peak_height = 15
if norm[p] > max_peak_height:
valid_peaks[idx] = False
# Filter by x-axis
gaps = peaks[1:] - peaks[:-1]
count = max(5, round(len(peaks) * 0.2))
approx_unit = np.mean(np.sort(gaps)[:count])
max_gap = approx_unit * max_gap_ratio
ext_peaks = [peaks[0] - max_gap - 1] + list(
peaks
) # Prepend an invalid peak for better handling edge case
groups = []
group = -1
for i in range(1, len(ext_peaks)):
if ext_peaks[i] - ext_peaks[i - 1] > max_gap:
group += 1
groups.append(group)
groups.append(groups[-1] + 1) # Append an invalid group for better handling edge case
cur_g = groups[0]
count = 1
for idx in range(1, len(groups)):
group = groups[idx]
if group == cur_g:
count += 1
continue
if count < constants.number_of_lines_on_a_staff:
# Incomplete peaks. Also eliminates the top and bottom incomplete staff lines.
valid_peaks[idx - count : idx] = False
elif count > constants.number_of_lines_on_a_staff:
cand_peaks = peaks[idx - count : idx]
head_part = cand_peaks[: constants.number_of_lines_on_a_staff]
tail_part = cand_peaks[-constants.number_of_lines_on_a_staff :]
if sum(norm[head_part]) > sum(norm[tail_part]):
valid_peaks[idx - count + constants.number_of_lines_on_a_staff : idx] = False
else:
valid_peaks[idx - count : idx - constants.number_of_lines_on_a_staff] = False
cur_g = group
count = 1
return valid_peaks, groups[:-1]
def find_horizontal_lines(
image: NDArray, unit_size: float, line_threshold: float = 0.0
) -> list[list[int]]:
# Split into zones horizontally and detects staff lines separately.
count = np.zeros(len(image), dtype=np.uint16)
sub_ys, _sub_xs = np.where(image > 0)
for y in sub_ys:
count[y] += 1
count = np.insert(count, [0, len(count)], [0, 0]) # Prepend / append
norm = (count - np.mean(count)) / np.std(count)
centers, _ = signal.find_peaks(norm, height=line_threshold, distance=unit_size, prominence=1)
centers -= 1
norm = norm[1:-1] # Remove prepend / append
_valid_centers, groups = filter_line_peaks(centers, norm)
grouped_centers: dict[int, list[int]] = {}
for i, center in enumerate(centers):
group_number = groups[i]
if group_number not in grouped_centers:
grouped_centers[group_number] = []
grouped_centers[group_number].append(center)
complete_groups = []
for key in grouped_centers.keys():
if len(grouped_centers[key]) == constants.number_of_lines_on_a_staff:
complete_groups.append(sorted(grouped_centers[key]))
return complete_groups
def predict_other_anchors_from_clefs(
clef_anchors: list[StaffAnchor], image: NDArray
) -> list[RotatedBoundingBox]:
if len(clef_anchors) == 0:
return []
average_unit_size = float(np.mean([anchor.average_unit_size for anchor in clef_anchors]))
anchor_symbols = [anchor.symbol for anchor in clef_anchors]
clef_zones = init_zone(clef_anchors, image.shape)
result: list[RotatedBoundingBox] = []
for zone in clef_zones:
vertical_slice = image[:, zone]
lines_groups = find_horizontal_lines(vertical_slice, average_unit_size)
for group in lines_groups:
min_y = min(group)
max_y = max(group)
center_y = (min_y + max_y) / 2
center_x = zone.start + (zone.stop - zone.start) / 2
box = ((int(center_x), int(center_y)), (zone.stop - zone.start, int(max_y - min_y)), 0)
result.append(RotatedBoundingBox(box, np.array([]), 0))
return [r for r in result if not r.is_overlapping_with_any(anchor_symbols)]
def break_wide_fragments(
fragments: list[RotatedBoundingBox], limit: int = 100
) -> list[RotatedBoundingBox]:
"""
Wide fragments (large x dimension) which are curved tend to be filtered by later steps.
We instead split them into smaller parts, so that the parts better approximate the different
angles of the curve.
"""
result = []
for fragment in fragments:
remaining_fragment = fragment
while remaining_fragment.size[0] > limit:
min_x = min([c[0][0] for c in remaining_fragment.contours])
contours_left = [c for c in remaining_fragment.contours if c[0][0] < min_x + limit]
contours_right = [c for c in remaining_fragment.contours if c[0][0] >= min_x + limit]
# sort by x
contours_left = sorted(contours_left, key=lambda c: c[0][0])
contours_right = sorted(contours_right, key=lambda c: c[0][0])
if len(contours_left) == 0 or len(contours_right) == 0:
break
# Make sure that the contours remain connected by adding
# the first point of the right side to the left side and vice versa
contours_left.append(contours_right[0])
contours_right.append(contours_left[-1])
result.append(
create_rotated_bounding_box(np.array(contours_left), remaining_fragment.debug_id)
)
remaining_fragment = create_rotated_bounding_box(
np.array(contours_right), remaining_fragment.debug_id
)
result.append(remaining_fragment)
return result
def detect_staff(
debug: Debug,
image: NDArray,
staff_fragments: list[RotatedBoundingBox],
clefs_keys: list[RotatedBoundingBox],
likely_bar_or_rests_lines: list[RotatedBoundingBox],
) -> list[Staff]:
"""
Detect staffs on the image. Staffs can be warped, have gaps and can be interrupted by symbols.
"""
staff_anchors = find_staff_anchors(staff_fragments, clefs_keys, are_clefs=True)
eprint("Found " + str(len(staff_anchors)) + " clefs")
possible_other_clefs = predict_other_anchors_from_clefs(staff_anchors, image)
eprint("Found " + str(len(possible_other_clefs)) + " possible other clefs")
staff_anchors.extend(find_staff_anchors(staff_fragments, possible_other_clefs, are_clefs=True))
staff_anchors.extend(
find_staff_anchors(staff_fragments, likely_bar_or_rests_lines, are_clefs=False)
)
staff_anchors = filter_unusual_anchors(staff_anchors)
eprint("Found " + str(len(staff_anchors)) + " staff anchors")
debug.write_bounding_boxes_alternating_colors("staff_anchors", staff_anchors)
raw_staffs_with_possible_duplicates = find_raw_staffs_by_connecting_line_fragments(
staff_anchors, staff_fragments
)
eprint("Found " + str(len(raw_staffs_with_possible_duplicates)) + " staffs")
raw_staffs = remove_duplicate_staffs(raw_staffs_with_possible_duplicates)
if len(raw_staffs_with_possible_duplicates) != len(raw_staffs):
eprint(
"Removed "
+ str(len(raw_staffs_with_possible_duplicates) - len(raw_staffs))
+ " duplicate staffs"
)
debug.write_bounding_boxes_alternating_colors(
"raw_staffs", raw_staffs + likely_bar_or_rests_lines + clefs_keys
)
staffs = resample_staffs(raw_staffs)
staffs = filter_edge_of_vision(staffs, image.shape)
staffs = sort_staffs_top_to_bottom(staffs)
return staffs
| 28,799 | Python | .py | 611 | 37.360065 | 101 | 0.619333 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,868 | brace_dot_detection.py | liebharc_homr/homr/brace_dot_detection.py | import cv2
import numpy as np
from homr import constants
from homr.bounding_boxes import RotatedBoundingBox
from homr.debug import Debug
from homr.model import MultiStaff, Staff
from homr.type_definitions import NDArray
def prepare_brace_dot_image(
symbols: NDArray, staff: NDArray, all_other: NDArray, unit_size: float
) -> NDArray:
brace_dot = cv2.subtract(symbols, staff)
"""
Remove horizontal lines.
"""
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
out = cv2.erode(brace_dot.astype(np.uint8), kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
return cv2.dilate(out, kernel)
def _filter_for_tall_elements(
brace_dot: list[RotatedBoundingBox], staffs: list[Staff]
) -> list[RotatedBoundingBox]:
"""
We filter elements in two steps:
1. Use a rough unit size estimate to reduce the data size
2. Find the closest staff and take its unit size to take warping into account
"""
rough_unit_size = staffs[0].average_unit_size
symbols_larger_than_rough_estimate = [
symbol
for symbol in brace_dot
if symbol.size[1] > constants.min_height_for_brace_rough(rough_unit_size)
and symbol.size[0] < constants.max_width_for_brace_rough(rough_unit_size)
]
result = []
for symbol in symbols_larger_than_rough_estimate:
closest_staff = min(staffs, key=lambda staff: staff.y_distance_to(symbol.center))
unit_size = closest_staff.average_unit_size
if symbol.size[1] > constants.min_height_for_brace(unit_size):
result.append(symbol)
return result
def _get_connections_between_staffs_at_bar_lines(
staff1: Staff, staff2: Staff, brace_dot: list[RotatedBoundingBox]
) -> list[RotatedBoundingBox]:
bar_lines1 = staff1.get_bar_lines()
bar_lines2 = staff2.get_bar_lines()
result: list[RotatedBoundingBox] = []
for symbol in brace_dot:
symbol_thicker = symbol.make_box_thicker(30)
first_overlapping_staff1 = [
line for line in bar_lines1 if symbol_thicker.is_overlapping(line.box)
]
first_overlapping_staff2 = [
line for line in bar_lines2 if symbol_thicker.is_overlapping(line.box)
]
if len(first_overlapping_staff1) >= 1 and len(first_overlapping_staff2) >= 1:
result.append(symbol)
return result
def _get_connections_between_staffs_at_clefs(
staff1: Staff, staff2: Staff, brace_dot: list[RotatedBoundingBox]
) -> list[RotatedBoundingBox]:
clefs1 = staff1.get_clefs()
clefs2 = staff2.get_clefs()
result: list[RotatedBoundingBox] = []
for symbol in brace_dot:
symbol_thicker = symbol.make_box_thicker(
constants.tolerance_for_staff_at_any_point(staff1.average_unit_size)
)
first_overlapping_staff1 = [
clef for clef in clefs1 if symbol_thicker.is_overlapping(clef.box)
]
first_overlapping_staff2 = [
clef for clef in clefs2 if symbol_thicker.is_overlapping(clef.box)
]
if len(first_overlapping_staff1) >= 1 and len(first_overlapping_staff2) >= 1:
result.append(symbol)
return result
def _get_connections_between_staffs_at_lines(
staff1: Staff, staff2: Staff, brace_dot: list[RotatedBoundingBox]
) -> list[RotatedBoundingBox]:
result: list[RotatedBoundingBox] = []
for symbol in brace_dot:
symbol_thicker = symbol.make_box_thicker(
constants.tolerance_for_touching_clefs(staff1.average_unit_size)
)
point1 = staff1.get_at(symbol.center[0])
point2 = staff2.get_at(symbol.center[0])
if point1 is None or point2 is None:
continue
if symbol_thicker.is_overlapping(
point1.to_bounding_box()
) and symbol_thicker.is_overlapping(point2.to_bounding_box()):
result.append(symbol)
return result
def _get_connections_between_staffs(
staff1: Staff, staff2: Staff, brace_dot: list[RotatedBoundingBox]
) -> list[RotatedBoundingBox]:
result = []
result.extend(_get_connections_between_staffs_at_bar_lines(staff1, staff2, brace_dot))
result.extend(_get_connections_between_staffs_at_clefs(staff1, staff2, brace_dot))
result.extend(_get_connections_between_staffs_at_lines(staff1, staff2, brace_dot))
return result
def _merge_multi_staff_if_they_share_a_staff(staffs: list[MultiStaff]) -> list[MultiStaff]:
"""
If two MultiStaff objects share a staff, merge them into one MultiStaff object.
"""
result: list[MultiStaff] = []
for staff in staffs:
any_merged = False
for existing in result:
if len(set(staff.staffs).intersection(set(existing.staffs))) > 0:
result.remove(existing)
result.append(existing.merge(staff))
any_merged = True
break
if not any_merged:
result.append(staff)
return result
def find_braces_brackets_and_grand_staff_lines(
debug: Debug, staffs: list[Staff], brace_dot: list[RotatedBoundingBox]
) -> list[MultiStaff]:
"""
Connect staffs from multiple voices or grand staffs by searching for brackets and grand staffs.
"""
brace_dot = _filter_for_tall_elements(brace_dot, staffs)
result = []
for i, staff in enumerate(staffs):
neighbors: list[Staff] = []
if i > 0:
neighbors.append(staffs[i - 1])
if i < len(staffs) - 1:
neighbors.append(staffs[i + 1])
any_connected_neighbor = False
for neighbor in neighbors:
connections = _get_connections_between_staffs(staff, neighbor, brace_dot)
if len(connections) >= constants.minimum_connections_to_form_combined_staff:
result.append(MultiStaff([staff, neighbor], connections))
any_connected_neighbor = True
if not any_connected_neighbor:
result.append(MultiStaff([staff], []))
return _merge_multi_staff_if_they_share_a_staff(result)
def _is_tiny_square(symbol: RotatedBoundingBox, unit_size: float) -> bool:
return symbol.size[0] < 0.5 * unit_size and symbol.size[1] < 0.5 * unit_size
def find_dots(
staffs: list[Staff], brace_dot: list[RotatedBoundingBox], unit_size: float
) -> list[RotatedBoundingBox]:
brace_dot = [symbol for symbol in brace_dot if _is_tiny_square(symbol, unit_size)]
result = []
for staff in staffs:
for symbol in brace_dot:
if not staff.is_on_staff_zone(symbol):
continue
point = staff.get_at(symbol.center[0])
if point is None:
continue
position = point.find_position_in_unit_sizes(symbol)
is_even_position = position % 2 == 0
# Dots are never on staff lines which would be indicated by an odd position
if not is_even_position:
continue
result.append(symbol)
return result
| 7,175 | Python | .py | 161 | 35.956522 | 100 | 0.652885 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,869 | resize.py | liebharc_homr/homr/resize.py | import numpy as np
from PIL import Image
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
def calc_target_image_size(image: Image.Image) -> tuple[int, int]:
# Estimate target size with number of pixels.
# Best number would be 3M~4.35M pixels.
w, h = image.size
pixels = w * h
target_size_min = 3.0 * 1000 * 1000
target_size_max = 4.35 * 1000 * 1000
if target_size_min <= pixels <= target_size_max:
return w, h
lb = target_size_min / pixels
ub = target_size_max / pixels
ratio = pow((lb + ub) / 2, 0.5)
tar_w = round(ratio * w)
tar_h = round(ratio * h)
return tar_w, tar_h
def resize_image(image_arr: NDArray) -> NDArray:
image = Image.fromarray(image_arr)
tar_w, tar_h = calc_target_image_size(image)
if tar_w == image_arr.shape[1] and tar_h == image_arr.shape[0]:
eprint("Keeping original size of", tar_w, "x", tar_h)
return image_arr
eprint(
"Resizing input from", image_arr.shape[1], "x", image_arr.shape[0], "to", tar_w, "x", tar_h
)
return np.array(image.resize((tar_w, tar_h)))
| 1,130 | Python | .py | 29 | 34.034483 | 99 | 0.643836 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,870 | decoder.py | liebharc_homr/homr/transformer/decoder.py | from math import ceil
from typing import Any
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from transformers import PreTrainedTokenizerFast # type: ignore
from x_transformers.x_transformers import ( # type: ignore
AbsolutePositionalEmbedding,
AttentionLayers,
Decoder,
TokenEmbedding,
)
from homr.debug import AttentionDebug
from homr.simple_logging import eprint
from homr.transformer.configs import Config
from homr.transformer.split_merge_symbols import SymbolMerger
class ScoreTransformerWrapper(nn.Module):
def __init__(
self,
config: Config,
attn_layers: Any,
l2norm_embed: bool = False,
) -> None:
super().__init__()
if not isinstance(attn_layers, AttentionLayers):
raise ValueError("attention layers must be an instance of AttentionLayers")
dim = attn_layers.dim
self.max_seq_len = config.max_seq_len
self.l2norm_embed = l2norm_embed
self.lift_emb = TokenEmbedding(
config.decoder_dim, config.num_lift_tokens, l2norm_embed=l2norm_embed
)
self.pitch_emb = TokenEmbedding(
config.decoder_dim, config.num_pitch_tokens, l2norm_embed=l2norm_embed
)
self.rhythm_emb = TokenEmbedding(
config.decoder_dim, config.num_rhythm_tokens, l2norm_embed=l2norm_embed
)
self.pos_emb = AbsolutePositionalEmbedding(
config.decoder_dim, config.max_seq_len, l2norm_embed=l2norm_embed
)
self.attention_dim = config.max_width * config.max_height // config.patch_size**2 + 1
self.attention_width = config.max_width // config.patch_size
self.attention_height = config.max_height // config.patch_size
self.patch_size = config.patch_size
self.project_emb = (
nn.Linear(config.decoder_dim, dim) if config.decoder_dim != dim else nn.Identity()
)
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.init_()
self.to_logits_lift = nn.Linear(dim, config.num_lift_tokens)
self.to_logits_pitch = nn.Linear(dim, config.num_pitch_tokens)
self.to_logits_rhythm = nn.Linear(dim, config.num_rhythm_tokens)
self.to_logits_note = nn.Linear(dim, config.num_note_tokens)
def init_(self) -> None:
if self.l2norm_embed:
nn.init.normal_(self.lift_emb.emb.weight, std=1e-5)
nn.init.normal_(self.pitch_emb.emb.weight, std=1e-5)
nn.init.normal_(self.rhythm_emb.emb.weight, std=1e-5)
nn.init.normal_(self.pos_emb.emb.weight, std=1e-5)
return
nn.init.kaiming_normal_(self.lift_emb.emb.weight)
nn.init.kaiming_normal_(self.pitch_emb.emb.weight)
nn.init.kaiming_normal_(self.rhythm_emb.emb.weight)
def forward(
self,
rhythms: torch.Tensor,
pitchs: torch.Tensor,
lifts: torch.Tensor,
mask: torch.Tensor | None = None,
return_hiddens: bool = True,
return_center_of_attention: bool = False,
**kwargs: Any,
) -> Any:
x = (
self.rhythm_emb(rhythms)
+ self.pitch_emb(pitchs)
+ self.lift_emb(lifts)
+ self.pos_emb(rhythms)
)
x = self.project_emb(x)
debug = kwargs.pop("debug", None)
x, hiddens = self.attn_layers(x, mask=mask, return_hiddens=return_hiddens, **kwargs)
if return_center_of_attention:
center_of_attention = self.calculate_center_of_attention(
debug, hiddens.attn_intermediates
)
else:
center_of_attention = None
x = self.norm(x)
out_lifts = self.to_logits_lift(x)
out_pitchs = self.to_logits_pitch(x)
out_rhythms = self.to_logits_rhythm(x)
out_notes = self.to_logits_note(x)
return out_rhythms, out_pitchs, out_lifts, out_notes, x, center_of_attention
def calculate_center_of_attention(
self, debug: AttentionDebug | None, intermediates: Any
) -> tuple[float, float]:
filtered_intermediate = [
tensor.post_softmax_attn[:, :, -1, :]
for tensor in intermediates
if tensor.post_softmax_attn.shape[-1] == self.attention_dim
]
attention_all_layers = torch.mean(torch.stack(filtered_intermediate), dim=0)
attention_all_layers = attention_all_layers.squeeze(0).squeeze(1)
attention_all_layers = attention_all_layers.mean(dim=0)
image_attention = attention_all_layers[1:]
image_attention_2d = (
image_attention.reshape(self.attention_height, self.attention_width).cpu().numpy()
)
center_of_attention = np.unravel_index(
image_attention_2d.argmax(), image_attention_2d.shape
)
center_of_attention = (
center_of_attention[0] * self.patch_size,
center_of_attention[1] * self.patch_size,
)
if debug is not None:
debug.add_attention(image_attention_2d, center_of_attention)
return center_of_attention
def top_k(logits: torch.Tensor, thres: float = 0.9) -> torch.Tensor:
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float("-inf"))
probs.scatter_(1, ind, val)
return probs
class ScoreDecoder(nn.Module):
def __init__(
self,
transformer: ScoreTransformerWrapper,
noteindexes: list[int],
config: Config,
ignore_index: int = -100,
):
super().__init__()
self.pad_value = (config.pad_token,)
self.ignore_index = ignore_index
self.config = config
self.lifttokenizer = PreTrainedTokenizerFast(tokenizer_file=config.filepaths.lifttokenizer)
self.pitchtokenizer = PreTrainedTokenizerFast(
tokenizer_file=config.filepaths.pitchtokenizer
)
self.rhythmtokenizer = PreTrainedTokenizerFast(
tokenizer_file=config.filepaths.rhythmtokenizer
)
self.net = transformer
self.max_seq_len = transformer.max_seq_len
note_mask = torch.zeros(config.num_rhythm_tokens)
note_mask[noteindexes] = 1
self.note_mask = nn.Parameter(note_mask)
# Weight the actual lift tokens (so neither nonote nor null) higher
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@torch.no_grad()
def generate( # noqa: PLR0915
self,
start_tokens: torch.Tensor,
nonote_tokens: torch.Tensor,
seq_len: int,
eos_token: int | None = None,
temperature: float = 1.0,
filter_thres: float = 0.7,
keep_all_symbols_in_chord: bool = False,
**kwargs: Any,
) -> list[str]:
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out_rhythm = start_tokens
out_pitch = nonote_tokens
out_lift = nonote_tokens
mask = kwargs.pop("mask", None)
merger = SymbolMerger(keep_all_symbols_in_chord=keep_all_symbols_in_chord)
if mask is None:
mask = torch.full_like(out_rhythm, True, dtype=torch.bool, device=out_rhythm.device)
for _position_in_seq in range(seq_len):
mask = mask[:, -self.max_seq_len :]
x_lift = out_lift[:, -self.max_seq_len :]
x_pitch = out_pitch[:, -self.max_seq_len :]
x_rhythm = out_rhythm[:, -self.max_seq_len :]
rhythmsp, pitchsp, liftsp, notesp, _ignored, center_of_attention = self.net(
x_rhythm, x_pitch, x_lift, mask=mask, return_center_of_attention=True, **kwargs
)
filtered_lift_logits = top_k(liftsp[:, -1, :], thres=filter_thres)
filtered_pitch_logits = top_k(pitchsp[:, -1, :], thres=filter_thres)
filtered_rhythm_logits = top_k(rhythmsp[:, -1, :], thres=filter_thres)
current_temperature = temperature
retry = True
attempt = 0
max_attempts = 5
while retry and attempt < max_attempts:
lift_probs = F.softmax(filtered_lift_logits / current_temperature, dim=-1)
pitch_probs = F.softmax(filtered_pitch_logits / current_temperature, dim=-1)
rhythm_probs = F.softmax(filtered_rhythm_logits / current_temperature, dim=-1)
lift_sample = torch.multinomial(lift_probs, 1)
pitch_sample = torch.multinomial(pitch_probs, 1)
rhythm_sample = torch.multinomial(rhythm_probs, 1)
lift_token = detokenize(lift_sample, self.lifttokenizer)
pitch_token = detokenize(pitch_sample, self.pitchtokenizer)
rhythm_token = detokenize(rhythm_sample, self.rhythmtokenizer)
is_eos = len(rhythm_token[0])
if is_eos == 0:
break
retry = merger.add_symbol(rhythm_token[0][0], pitch_token[0][0], lift_token[0][0])
current_temperature *= 3.5
attempt += 1
out_lift = torch.cat((out_lift, lift_sample), dim=-1)
out_pitch = torch.cat((out_pitch, pitch_sample), dim=-1)
out_rhythm = torch.cat((out_rhythm, rhythm_sample), dim=-1)
mask = F.pad(mask, (0, 1), value=True)
if (
eos_token is not None
and (torch.cumsum(out_rhythm == eos_token, 1)[:, -1] >= 1).all()
):
break
out_lift = out_lift[:, t:]
out_pitch = out_pitch[:, t:]
out_rhythm = out_rhythm[:, t:]
self.net.train(was_training)
return [merger.complete()]
def forward(
self,
rhythms: torch.Tensor,
pitchs: torch.Tensor,
lifts: torch.Tensor,
notes: torch.Tensor,
**kwargs: Any,
) -> dict[str, torch.Tensor]:
liftsi = lifts[:, :-1]
liftso = lifts[:, 1:]
pitchsi = pitchs[:, :-1]
pitchso = pitchs[:, 1:]
rhythmsi = rhythms[:, :-1]
rhythmso = rhythms[:, 1:]
noteso = notes[:, 1:]
mask = kwargs.get("mask", None)
if mask is not None and mask.shape[1] == rhythms.shape[1]:
mask = mask[:, :-1]
kwargs["mask"] = mask
rhythmsp, pitchsp, liftsp, notesp, x, _attention = self.net(
rhythmsi, pitchsi, liftsi, **kwargs
) # this calls ScoreTransformerWrapper.forward
loss_consist = self.calConsistencyLoss(rhythmsp, pitchsp, liftsp, notesp, mask)
loss_rhythm = self.masked_logits_cross_entropy(rhythmsp, rhythmso, mask)
loss_pitch = self.masked_logits_cross_entropy(pitchsp, pitchso, mask)
loss_lift = self.masked_logits_cross_entropy(liftsp, liftso, mask)
loss_note = self.masked_logits_cross_entropy(notesp, noteso, mask)
# From the TR OMR paper equation 2, we use however different values for alpha and beta
alpha = 0.1
beta = 1
loss_sum = loss_rhythm + loss_pitch + loss_lift + loss_note
loss = alpha * loss_sum + beta * loss_consist
return {
"loss_rhythm": loss_rhythm,
"loss_pitch": loss_pitch,
"loss_lift": loss_lift,
"loss_consist": loss_consist,
"loss_note": loss_note,
"loss": loss,
}
def calConsistencyLoss(
self,
rhythmsp: torch.Tensor,
pitchsp: torch.Tensor,
liftsp: torch.Tensor,
notesp: torch.Tensor,
mask: torch.Tensor,
gamma: int = 10,
) -> torch.Tensor:
notesp_soft = torch.softmax(notesp, dim=2)
note_flag = notesp_soft[:, :, 1] * mask
rhythmsp_soft = torch.softmax(rhythmsp, dim=2)
rhythmsp_note = torch.sum(rhythmsp_soft * self.note_mask, dim=2) * mask
pitchsp_soft = torch.softmax(pitchsp, dim=2)
pitchsp_note = torch.sum(pitchsp_soft[:, :, 1:], dim=2) * mask
liftsp_soft = torch.softmax(liftsp, dim=2)
liftsp_note = torch.sum(liftsp_soft[:, :, 1:], dim=2) * mask
loss = (
gamma
* (
F.l1_loss(rhythmsp_note, note_flag, reduction="none")
+ F.l1_loss(note_flag, liftsp_note, reduction="none")
+ F.l1_loss(note_flag, pitchsp_note, reduction="none")
)
/ 3.0
)
# Apply the mask to the loss and average over the non-masked elements
loss = (loss * mask).sum() / mask.sum()
return loss
def masked_logits_cross_entropy(
self,
logits: torch.Tensor,
target: torch.Tensor,
mask: torch.Tensor,
weights: torch.Tensor | None = None,
) -> torch.Tensor:
# Calculate the cross-entropy loss
loss = F.cross_entropy(
logits.transpose(1, 2),
target,
reduction="none",
weight=weights,
ignore_index=self.ignore_index,
)
# As reduction is "none", we can apply the mask to the loss
# and this way we ignore the loss for the padded tokens
loss = loss * mask
loss = loss.sum() / mask.sum()
return loss
def get_decoder(config: Config) -> ScoreDecoder:
return ScoreDecoder(
ScoreTransformerWrapper(
config=config,
attn_layers=Decoder(
dim=config.decoder_dim,
depth=config.decoder_depth,
heads=config.decoder_heads,
**config.decoder_args.to_dict(),
),
),
config=config,
noteindexes=config.noteindexes,
)
def detokenize(tokens: torch.Tensor, tokenizer: Any) -> list[list[str]]:
toks = [tokenizer.convert_ids_to_tokens(tok) for tok in tokens]
for b in range(len(toks)):
for i in reversed(range(len(toks[b]))):
if toks[b][i] is None:
toks[b][i] = ""
toks[b][i] = toks[b][i].replace("Ä ", " ").strip()
if toks[b][i] in (["[BOS]", "[EOS]", "[PAD]"]):
del toks[b][i]
return toks
def tokenize(
symbols: list[str], vocab: Any, default_token: int, vocab_name: str, file_name: str
) -> list[int]:
result = []
for symbol in symbols:
if symbol in vocab:
result.append(vocab[symbol])
else:
eprint("Warning " + file_name + ": " + symbol + " not in " + vocab_name + " vocabulary")
result.append(default_token)
return result
| 14,741 | Python | .py | 348 | 32.488506 | 100 | 0.595534 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,871 | configs.py | liebharc_homr/homr/transformer/configs.py | import json
import os
from typing import Any
workspace = os.path.join(os.path.dirname(__file__))
class FilePaths:
def __init__(self) -> None:
self.checkpoint = os.path.join(
workspace, "pytorch_model_101-ba12ebef4606948816a06f4a011248d07a6f06da.pth"
)
self.rhythmtokenizer = os.path.join(workspace, "tokenizer_rhythm.json")
self.lifttokenizer = os.path.join(workspace, "tokenizer_lift.json")
self.pitchtokenizer = os.path.join(workspace, "tokenizer_pitch.json")
self.rhythmtokenizer = os.path.join(workspace, "tokenizer_rhythm.json")
self.notetokenizer = os.path.join(workspace, "tokenizer_note.json")
def to_dict(self) -> dict[str, Any]:
return {
"checkpoint": self.checkpoint,
"rhythmtokenizer": self.rhythmtokenizer,
"lifttokenizer": self.lifttokenizer,
"pitchtokenizer": self.pitchtokenizer,
"notetokenizer": self.notetokenizer,
}
def to_json_string(self) -> str:
return json.dumps(self.to_dict(), indent=2)
class DecoderArgs:
def __init__(self) -> None:
self.attn_on_attn = True
self.cross_attend = True
self.ff_glu = True
self.rel_pos_bias = False
self.use_scalenorm = False
def to_dict(self) -> dict[str, Any]:
return {
"attn_on_attn": self.attn_on_attn,
"cross_attend": self.cross_attend,
"ff_glu": self.ff_glu,
"rel_pos_bias": self.rel_pos_bias,
"use_scalenorm": self.use_scalenorm,
}
def to_json_string(self) -> str:
return json.dumps(self.to_dict(), indent=2)
class Config:
def __init__(self) -> None:
self.filepaths = FilePaths()
self.channels = 1
self.patch_size = 16
self.max_height = 128
self.max_width = 1280
self.max_seq_len = 256
self.pad_token = 0
self.bos_token = 1
self.eos_token = 2
self.nonote_token = 0
self.num_rhythm_tokens = 89
self.num_note_tokens = 2
self.num_pitch_tokens = 71
self.num_lift_tokens = 5
self.lift_null = 0
self.lift_sharp = 2
self.lift_flat = 3
self.encoder_structure = "hybrid"
self.encoder_depth = 8
self.backbone_layers = [2, 3, 7]
self.encoder_dim = 256
self.encoder_heads = 8
self.decoder_dim = 256
self.decoder_depth = 8
self.decoder_heads = 8
self.temperature = 0.01
self.decoder_args = DecoderArgs()
self.lift_vocab = json.load(open(self.filepaths.lifttokenizer))["model"]["vocab"]
self.pitch_vocab = json.load(open(self.filepaths.pitchtokenizer))["model"]["vocab"]
self.note_vocab = json.load(open(self.filepaths.notetokenizer))["model"]["vocab"]
self.rhythm_vocab = json.load(open(self.filepaths.rhythmtokenizer))["model"]["vocab"]
self.noteindexes = self._get_values_of_keys_starting_with("note-")
self.restindexes = self._get_values_of_keys_starting_with(
"rest-"
) + self._get_values_of_keys_starting_with("multirest-")
self.chordindex = self.rhythm_vocab["|"]
self.barlineindex = self.rhythm_vocab["barline"]
def _get_values_of_keys_starting_with(self, prefix: str) -> list[int]:
return [value for key, value in self.rhythm_vocab.items() if key.startswith(prefix)]
def to_dict(self) -> dict[str, Any]:
return {
"filepaths": self.filepaths.to_dict(),
"channels": self.channels,
"patch_size": self.patch_size,
"max_height": self.max_height,
"max_width": self.max_width,
"max_seq_len": self.max_seq_len,
"pad_token": self.pad_token,
"bos_token": self.bos_token,
"eos_token": self.eos_token,
"nonote_token": self.nonote_token,
"noteindexes": self.noteindexes,
"encoder_structure": self.encoder_structure,
"encoder_depth": self.encoder_depth,
"backbone_layers": self.backbone_layers,
"encoder_dim": self.encoder_dim,
"encoder_heads": self.encoder_heads,
"num_rhythm_tokens": self.num_rhythm_tokens,
"decoder_dim": self.decoder_dim,
"decoder_depth": self.decoder_depth,
"decoder_heads": self.decoder_heads,
"temperature": self.temperature,
"decoder_args": self.decoder_args.to_dict(),
}
def to_json_string(self) -> str:
return json.dumps(self.to_dict(), indent=2)
# Initialize the Config class
default_config = Config()
| 4,701 | Python | .py | 111 | 32.990991 | 93 | 0.606471 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,872 | split_merge_symbols.py | liebharc_homr/homr/transformer/split_merge_symbols.py | import re
from homr import constants
from homr.circle_of_fifths import (
KeyTransformation,
NoKeyTransformation,
key_signature_to_circle_of_fifth,
)
from homr.simple_logging import eprint
from homr.transformer.configs import default_config
class SymbolMerger:
def __init__(self, keep_all_symbols_in_chord: bool) -> None:
self._keep_all_symbols_in_chord = keep_all_symbols_in_chord
self.merge: list[list[str]] = []
self.next_symbol_is_chord: bool = False
self.last_clef: str = ""
def _append_symbol(self, symbol: str) -> None:
if self.next_symbol_is_chord:
if len(self.merge) == 0:
eprint("Warning: Unexpected chord symbol")
return
self.merge[-1].append(symbol)
self.next_symbol_is_chord = False
else:
self.merge.append([symbol])
def add_symbol(self, predrhythm: str, predpitch: str, predlift: str) -> bool:
"""
Adds a symbol to the merge list. Returns True if the symbol should be retried.
If you done with adding symbols, call complete() to get the merged string.
"""
if predrhythm == "|":
self.next_symbol_is_chord = True
if len(self.merge) == 0:
eprint("Warning: Unexpected chord symbol")
return True
return False
elif "note" in predrhythm:
lift = ""
if predlift in (
"lift_##",
"lift_#",
"lift_bb",
"lift_b",
"lift_N",
):
lift = predlift.split("_")[-1]
self._append_symbol(predpitch + lift + "_" + predrhythm.split("note-")[-1])
return False
elif "clef" in predrhythm:
# Two clefs in a the same staff are very unlikely
if self.last_clef and self.last_clef != predrhythm:
eprint("Warning: Two clefs in a staff")
return True
self.last_clef = predrhythm
self._append_symbol(predrhythm)
return False
else:
self._append_symbol(predrhythm)
return False
def _clean_and_sort_chord(self, chord: list[str]) -> list[str]:
if len(chord) == 1:
return chord
if not self._keep_all_symbols_in_chord:
chord = [symbol for symbol in chord if symbol.startswith("note")]
chord = sorted(chord, key=pitch_name_to_sortable)
return chord
def complete(self) -> str:
merged = [str.join("|", self._clean_and_sort_chord(symbols)) for symbols in self.merge]
return str.join("+", merged)
def merge_single_line(
predrhythm: list[str],
predpitch: list[str],
predlift: list[str],
keep_all_symbols_in_chord: bool,
) -> str:
merger = SymbolMerger(keep_all_symbols_in_chord=keep_all_symbols_in_chord)
for j in range(len(predrhythm)):
merger.add_symbol(predrhythm[j], predpitch[j], predlift[j])
return merger.complete()
def merge_symbols(
predrhythms: list[list[str]],
predpitchs: list[list[str]],
predlifts: list[list[str]],
keep_all_symbols_in_chord: bool = False,
) -> list[str]:
merges = []
for i in range(len(predrhythms)):
predrhythm = predrhythms[i]
predlift = predlifts[i]
predpitch = predpitchs[i]
merge = merge_single_line(
predrhythm, predpitch, predlift, keep_all_symbols_in_chord=keep_all_symbols_in_chord
)
merges.append(merge)
return merges
def _get_alter(symbol: str) -> str | None:
if symbol.startswith(("note", "gracenote")):
note = symbol.split("_")[0]
# We have no support for double accidentals right now
alterations = {"##": "#", "#": "#", "bb": "b", "b": "b", "N": "N"}
for alteration, return_value in alterations.items():
if alteration in note:
return return_value
return ""
return None
def _alter_to_lift(symbol: str) -> str:
if symbol == "#":
return "lift_#"
elif symbol == "b":
return "lift_b"
elif symbol == "N":
return "lift_N"
else:
return "lift_null"
def _replace_accidentals(notename: str) -> str:
notename = notename.replace("#", "")
notename = notename.replace("b", "")
notename = notename.replace("N", "")
return notename
def _symbol_to_pitch(symbol: str) -> str:
if symbol.startswith(("note", "gracenote")):
without_duration = symbol.split("_")[0]
notename = without_duration.split("-")[1]
notename = _replace_accidentals(notename)
notename = "note-" + notename
return notename
return "nonote"
def _add_duration_modifier(duration: str) -> str:
# TrOMR only allows one dot
if "." in duration:
return "."
if constants.triplet_symbol in duration:
# Ignore triplets for now
# return constants.triplet_symbol
return ""
return ""
def _translate_duration(duration: str) -> str:
duration = duration.replace("second", "breve")
duration = duration.replace("double", "breve")
duration = duration.replace("quadruple", "breve")
duration = duration.replace("thirty", "thirty_second")
duration = duration.replace("sixty", "sixty_fourth")
duration = duration.replace("hundred", "hundred_twenty_eighth")
# We add duration modifiers later again
duration = duration.replace(".", "")
duration = duration.replace(constants.triplet_symbol, "")
return duration
def _symbol_to_rhythm(symbol: str) -> str:
if symbol.startswith(("note", "gracenote")):
note = "note-" + _translate_duration(symbol.split("_")[1])
return note + _add_duration_modifier(symbol)
symbol = symbol.replace("rest-double_whole", "multirest-2")
symbol = symbol.replace("rest-quadruple_whole", "multirest-2")
symbol = symbol.replace("_fermata", "")
# We add duration modifiers later again
symbol = symbol.replace(".", "")
symbol = symbol.replace(constants.triplet_symbol, "")
multirest_match = re.match(r"(rest-whole|multirest-)(\d+)", symbol)
if multirest_match:
rest_length = int(multirest_match[2])
# Some multirests don't exist in the rhtythm tokenizer,
# for now it's good enough to just recognize them as any multirest
if rest_length <= 1:
return "rest-whole"
max_supported_multi_rest = 10
if rest_length > max_supported_multi_rest:
return "multirest-" + str(max_supported_multi_rest)
symbol = "multirest-" + str(rest_length)
timesignature_match = re.match(r"timeSignature-(\d+)/(\d+)", symbol)
if timesignature_match:
return "timeSignature-/" + timesignature_match[2]
return symbol + _add_duration_modifier(symbol)
def _symbol_to_note(symbol: str) -> str:
if symbol.startswith(("note", "gracenote")):
return "note"
return "nonote"
def _note_name_and_octave_to_sortable(note_name_with_octave: str) -> int:
if note_name_with_octave not in default_config.pitch_vocab:
eprint(
"Warning: unknown note in _note_name_and_octave_to_sortable: ", note_name_with_octave
)
return 1000
# minus to get the right order
return -int(default_config.pitch_vocab[note_name_with_octave])
def pitch_name_to_sortable(pitch_or_rest_name: str) -> int:
if pitch_or_rest_name.startswith("rest"):
pitch_or_rest_name = pitch_or_rest_name.replace("rest_", "rest-")
if pitch_or_rest_name in default_config.rhythm_vocab:
return 1000 + int(default_config.rhythm_vocab[pitch_or_rest_name])
else:
eprint("Warning: rest not in rhythm_vocab", pitch_or_rest_name)
return 1000
note_name = pitch_or_rest_name.split("_")[0]
note_name = _replace_accidentals(note_name)
return _note_name_and_octave_to_sortable(note_name)
def _sort_by_pitch(
lifts: list[str], pitches: list[str], rhythms: list[str], notes: list[str]
) -> tuple[list[str], list[str], list[str], list[str]]:
lifts = lifts.copy()
pitches = pitches.copy()
rhythms = rhythms.copy()
notes = notes.copy()
def swap(i: int, j: int) -> None:
lifts[i], lifts[j] = lifts[j], lifts[i]
pitches[i], pitches[j] = pitches[j], pitches[i]
rhythms[i], rhythms[j] = rhythms[j], rhythms[i]
notes[i], notes[j] = notes[j], notes[i]
for i in range(len(pitches)):
if not rhythms[i].startswith("note") and not rhythms[i].startswith("rest"):
continue
expect_chord = True
for j in range(i + 1, len(pitches)):
is_chord = rhythms[j] == "|"
if is_chord != expect_chord:
break
if is_chord:
expect_chord = False
continue
if not rhythms[j].startswith("note") and not rhythms[j].startswith("rest"):
break
symbol_at_i = pitches[i] if pitches[i] != "nonote" else rhythms[i]
symbol_at_j = pitches[j] if pitches[j] != "nonote" else rhythms[j]
if pitch_name_to_sortable(symbol_at_i) > pitch_name_to_sortable(symbol_at_j):
swap(i, j)
expect_chord = True
return lifts, pitches, rhythms, notes
def convert_alter_to_accidentals(merged: list[str]) -> list[str]:
"""
Moves alter information into accidentals.
For example:
"""
all_results = []
for line in range(len(merged)):
key = KeyTransformation(0)
line_result = []
for symbols in re.split("\\s+", merged[line].replace("+", " ")):
symbol_result = []
for symbol in re.split("(\\|)", symbols):
if symbol.startswith("keySignature"):
key = KeyTransformation(key_signature_to_circle_of_fifth(symbol.split("-")[-1]))
symbol_result.append(symbol)
elif symbol == "barline":
key = key.reset_at_end_of_measure()
symbol_result.append(symbol)
elif symbol.startswith(("note", "gracenote")):
pitch = _symbol_to_pitch(symbol)
alter = _get_alter(symbol)
note_name = pitch[5:7]
accidental = key.add_accidental(note_name, alter)
parts = symbol.split("_")
transformed_symbol = (
parts[0].replace("N", "").replace("#", "").replace("b", "")
+ accidental
+ "_"
+ parts[1]
)
symbol_result.append(transformed_symbol)
elif symbol != "|":
symbol_result.append(symbol)
if len(symbol_result) > 0:
line_result.append(str.join("|", symbol_result))
all_results.append(str.join("+", line_result))
return all_results
def split_semantic_file(
file_path: str,
) -> tuple[list[list[str]], list[list[str]], list[list[str]], list[list[str]]]:
is_primus = "Corpus" in file_path
with open(file_path) as f:
return split_symbols(f.readlines(), convert_to_modified_semantic=is_primus)
def split_symbols( # noqa: C901
merged: list[str], convert_to_modified_semantic: bool = True
) -> tuple[list[list[str]], list[list[str]], list[list[str]], list[list[str]]]:
"""
modified_semantic: Semantic format but with accidentals depending on how they are placed.
E.g. the semantic format is Key D Major, Note C#, Note Cb, Note Cb
so the TrOMR will be: Key D Major, Note C, Note Cb, Note C because
the flat is the only visible accidental in the image.
"""
predlifts = []
predpitchs = []
predrhythms = []
prednotes = []
for line in range(len(merged)):
predlift = []
predpitch = []
predrhythm = []
prednote = []
key = KeyTransformation(0) if convert_to_modified_semantic else NoKeyTransformation()
for symbols in re.split("\\s+|\\+", merged[line].strip()):
symbollift = []
symbolpitch = []
symbolrhythm = []
symbolnote = []
for symbol in re.split("(\\|)", symbols):
if symbol.startswith("keySignature"):
if convert_to_modified_semantic:
key = KeyTransformation(
key_signature_to_circle_of_fifth(symbol.split("-")[-1])
)
if symbol == "barline":
key = key.reset_at_end_of_measure()
if symbol == "tie":
continue
elif symbol == "|":
symbolrhythm.append("|")
symbolpitch.append("nonote")
symbollift.append("nonote")
symbolnote.append("nonote")
else:
pitch = _symbol_to_pitch(symbol)
symbolpitch.append(pitch)
symbolrhythm.append(_symbol_to_rhythm(symbol))
symbolnote.append(_symbol_to_note(symbol))
alter = _get_alter(symbol)
if alter is not None:
note_name = pitch[5:7]
alter = key.add_accidental(note_name, alter)
symbollift.append(_alter_to_lift(alter))
else:
symbollift.append("nonote")
if len(symbolpitch) > 0:
symbollift, symbolpitch, symbolrhythm, symbolnote = _sort_by_pitch(
symbollift, symbolpitch, symbolrhythm, symbolnote
)
predpitch += symbolpitch
predrhythm += symbolrhythm
prednote += symbolnote
predlift += symbollift
predlifts.append(predlift)
predpitchs.append(predpitch)
predrhythms.append(predrhythm)
prednotes.append(prednote)
return predlifts, predpitchs, predrhythms, prednotes
| 14,180 | Python | .py | 332 | 32.503012 | 100 | 0.579535 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,873 | encoder.py | liebharc_homr/homr/transformer/encoder.py | from typing import Any
from timm.models.layers import StdConv2dSame # type: ignore
from timm.models.resnetv2 import ResNetV2 # type: ignore
from timm.models.vision_transformer import VisionTransformer # type: ignore
from timm.models.vision_transformer_hybrid import HybridEmbed # type: ignore
from homr.transformer.configs import Config
def get_encoder(config: Config) -> Any:
backbone_layers = list(config.backbone_layers)
backbone = ResNetV2(
num_classes=0,
global_pool="",
in_chans=config.channels,
drop_rate=0.1,
drop_path_rate=0.1,
layers=backbone_layers,
preact=True,
stem_type="same",
conv_layer=StdConv2dSame,
)
min_patch_size = 2 ** (len(backbone_layers) + 1)
def embed_layer(**x: Any) -> Any:
ps = x.pop("patch_size", min_patch_size)
if ps % min_patch_size != 0 or ps < min_patch_size:
raise ValueError(
"patch_size needs to be multiple of %i with current backbone configuration"
% min_patch_size
)
return HybridEmbed(**x, patch_size=ps // min_patch_size, backbone=backbone)
encoder = VisionTransformer(
img_size=(config.max_height, config.max_width),
patch_size=config.patch_size,
in_chans=config.channels,
num_classes=0,
embed_dim=config.encoder_dim,
depth=config.encoder_depth,
num_heads=config.encoder_heads,
embed_layer=embed_layer,
global_pool="",
)
return encoder
| 1,593 | Python | .py | 40 | 31.125 | 92 | 0.635951 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,874 | tromr_arch.py | liebharc_homr/homr/transformer/tromr_arch.py | from typing import Any
import torch
from torch import nn
from homr.debug import AttentionDebug
from homr.transformer.configs import Config
from homr.transformer.decoder import get_decoder
from homr.transformer.encoder import get_encoder
class TrOMR(nn.Module):
def __init__(self, config: Config):
super().__init__()
self.encoder = get_encoder(config)
self.decoder = get_decoder(config)
self.config = config
def eval_mode(self) -> None:
self.decoder.eval()
self.encoder.eval()
def forward(
self,
inputs: torch.Tensor,
rhythms_seq: torch.Tensor,
pitchs_seq: torch.Tensor,
lifts_seq: torch.Tensor,
note_seq: torch.Tensor,
mask: torch.Tensor,
**kwargs: Any,
) -> Any:
encoded = self.encoder(inputs)
loss = self.decoder(
rhythms_seq, pitchs_seq, lifts_seq, note_seq, context=encoded, mask=mask, **kwargs
)
return loss
@torch.no_grad()
def generate(
self, x: torch.Tensor, keep_all_symbols_in_chord: bool, debug: AttentionDebug | None
) -> list[str]:
start_token = (torch.LongTensor([self.config.bos_token] * len(x))[:, None]).to(x.device)
nonote_token = (torch.LongTensor([self.config.nonote_token] * len(x))[:, None]).to(x.device)
context = self.encoder(x)
return self.decoder.generate(
start_token,
nonote_token,
self.config.max_seq_len,
eos_token=self.config.eos_token,
context=context,
keep_all_symbols_in_chord=keep_all_symbols_in_chord,
debug=debug,
)
| 1,677 | Python | .py | 47 | 27.787234 | 100 | 0.623305 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,875 | staff2score.py | liebharc_homr/homr/transformer/staff2score.py | import os
import albumentations as alb # type: ignore
import cv2
import safetensors
import torch
from albumentations.pytorch import ToTensorV2 # type: ignore
from homr.debug import AttentionDebug
from homr.transformer.configs import Config
from homr.transformer.tromr_arch import TrOMR
from homr.type_definitions import NDArray
class Staff2Score:
def __init__(self, config: Config, keep_all_symbols_in_chord: bool = False) -> None:
self.config = config
self.keep_all_symbols_in_chord = keep_all_symbols_in_chord
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = TrOMR(config)
self.model.eval_mode()
checkpoint_file_path = config.filepaths.checkpoint
if not os.path.exists(checkpoint_file_path):
raise RuntimeError("Please download the model first to " + checkpoint_file_path)
if ".safetensors" in checkpoint_file_path:
tensors = {}
with safetensors.safe_open(checkpoint_file_path, framework="pt", device=0) as f: # type: ignore
for k in f.keys():
tensors[k] = f.get_tensor(k)
self.model.load_state_dict(tensors, strict=False)
elif torch.cuda.is_available():
self.model.load_state_dict(torch.load(checkpoint_file_path), strict=False)
else:
self.model.load_state_dict(
torch.load(checkpoint_file_path, map_location=torch.device("cpu")), strict=False
)
self.model.to(self.device)
if not os.path.exists(config.filepaths.rhythmtokenizer):
raise RuntimeError("Failed to find tokenizer config" + config.filepaths.rhythmtokenizer)
def predict(self, image: NDArray, debug: AttentionDebug | None = None) -> list[str]:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imgs_tensor = self._image_to_tensor(image)
return self._generate(
imgs_tensor,
debug=debug,
)
def _image_to_tensor(self, image: NDArray) -> torch.Tensor:
transformed = _transform(image=image)["image"][:1]
imgs_tensor = transformed.float().unsqueeze(1)
return imgs_tensor.to(self.device) # type: ignore
def _generate(
self,
imgs_tensor: torch.Tensor,
debug: AttentionDebug | None = None,
) -> list[str]:
return self.model.generate(
imgs_tensor,
keep_all_symbols_in_chord=self.keep_all_symbols_in_chord,
debug=debug,
)
_transform = alb.Compose(
[
alb.ToGray(always_apply=True),
alb.Normalize((0.7931, 0.7931, 0.7931), (0.1738, 0.1738, 0.1738)),
ToTensorV2(),
]
)
def readimg(config: Config, path: str) -> NDArray:
img: NDArray = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if img is None:
raise ValueError("Failed to read image from " + path)
if img.shape[-1] == 4: # noqa: PLR2004
img = 255 - img[:, :, 3]
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
elif img.shape[-1] == 3: # noqa: PLR2004
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
elif len(img.shape) == 2: # noqa: PLR2004
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
else:
raise RuntimeError("Unsupport image type!")
h, w, c = img.shape
size_h = config.max_height
new_h = size_h
new_w = int(size_h / h * w)
new_w = new_w // config.patch_size * config.patch_size
img = cv2.resize(img, (new_w, new_h))
img = _transform(image=img)["image"][:1]
return img
| 3,569 | Python | .py | 84 | 34.702381 | 108 | 0.638145 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,876 | inference.py | liebharc_homr/homr/segmentation/inference.py | import json
import os
from typing import Any
import numpy as np
import tensorflow as tf
from PIL import Image
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
class InferenceModel:
def __init__(self, model_path: str) -> None:
model, metadata = _load_model(model_path)
self.model = model
self.input_shape = metadata["input_shape"]
self.output_shape = metadata["output_shape"]
def inference( # noqa: C901, PLR0912
self,
image: NDArray,
step_size: int = 128,
batch_size: int = 16,
manual_th: Any | None = None,
) -> tuple[NDArray, NDArray]:
# Collect data
# Tricky workaround to avoid random mistery transpose when loading with 'Image'.
image_rgb = Image.fromarray(image).convert("RGB")
image = np.array(image_rgb)
win_size = self.input_shape[1]
data = []
for y in range(0, image.shape[0], step_size):
if y + win_size > image.shape[0]:
y = image.shape[0] - win_size # noqa: PLW2901
for x in range(0, image.shape[1], step_size):
if x + win_size > image.shape[1]:
x = image.shape[1] - win_size # noqa: PLW2901
hop = image[y : y + win_size, x : x + win_size]
data.append(hop)
# Predict
pred = []
for idx in range(0, len(data), batch_size):
eprint(f"{idx+1}/{len(data)} (step: {batch_size})", end="\r")
batch = np.array(data[idx : idx + batch_size])
out = self.model.serve(batch)
pred.append(out)
eprint(f"{len(data)}/{len(data)} (step: {batch_size})") # Add newline after progress
# Merge prediction patches
output_shape = image.shape[:2] + (self.output_shape[-1],)
out = np.zeros(output_shape, dtype=np.float32)
mask = np.zeros(output_shape, dtype=np.float32)
hop_idx = 0
for y in range(0, image.shape[0], step_size):
if y + win_size > image.shape[0]:
y = image.shape[0] - win_size # noqa: PLW2901
for x in range(0, image.shape[1], step_size):
if x + win_size > image.shape[1]:
x = image.shape[1] - win_size # noqa: PLW2901
batch_idx = hop_idx // batch_size
remainder = hop_idx % batch_size
hop = pred[batch_idx][remainder]
out[y : y + win_size, x : x + win_size] += hop
mask[y : y + win_size, x : x + win_size] += 1
hop_idx += 1
out /= mask
if manual_th is None:
class_map = np.argmax(out, axis=-1)
else:
if len(manual_th) != output_shape[-1] - 1:
raise ValueError(f"{manual_th}, {output_shape[-1]}")
class_map = np.zeros(out.shape[:2] + (len(manual_th),))
for idx, th in enumerate(manual_th):
class_map[..., idx] = np.where(out[..., idx + 1] > th, 1, 0)
return class_map, out
cached_segmentation: dict[str, Any] = {}
def inference(
model_path: str,
image: NDArray,
step_size: int = 128,
batch_size: int = 16,
manual_th: Any | None = None,
) -> tuple[NDArray, NDArray]:
if model_path not in cached_segmentation:
model = InferenceModel(model_path)
cached_segmentation[model_path] = model
else:
model = cached_segmentation[model_path]
return model.inference(image, step_size, batch_size, manual_th)
def _load_model(model_path: str) -> tuple[Any, dict[str, Any]]:
"""Load model and metadata"""
model = tf.saved_model.load(model_path)
with open(os.path.join(model_path, "meta.json")) as f:
metadata = json.loads(f.read())
return model, metadata
| 3,939 | Python | .py | 90 | 33.166667 | 94 | 0.554391 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,877 | config.py | liebharc_homr/homr/segmentation/config.py | import os
script_location = os.path.dirname(os.path.realpath(__file__))
unet_path = os.path.join(script_location, "unet_91-df68794a7f3420b749780deb1eba938911b3d0d3")
segnet_path = os.path.join(script_location, "segnet_89-f8076e6ee78bf998e291a56647477de80aa19f64")
unet_version = os.path.basename(unet_path).split("_")[1]
segnet_version = os.path.basename(segnet_path).split("_")[1]
segmentation_version = unet_version + "_" + segnet_version
| 444 | Python | .py | 7 | 62 | 97 | 0.778802 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,878 | segmentation.py | liebharc_homr/homr/segmentation/segmentation.py | import hashlib
import lzma
import os
from pathlib import Path
import cv2
import numpy as np
from homr.segmentation import config
from homr.segmentation.inference import inference
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
def generate_pred(image: NDArray) -> tuple[NDArray, NDArray, NDArray, NDArray, NDArray]:
if config.unet_path == config.segnet_path:
raise ValueError("unet_path and segnet_path should be different")
eprint("Extracting staffline and symbols")
staff_symbols_map, _ = inference(
config.unet_path,
image,
)
staff_layer = 1
staff = np.where(staff_symbols_map == staff_layer, 1, 0)
symbol_layer = 2
symbols = np.where(staff_symbols_map == symbol_layer, 1, 0)
eprint("Extracting layers of different symbols")
sep, _ = inference(
config.segnet_path,
image,
manual_th=None,
)
stems_layer = 1
stems_rests = np.where(sep == stems_layer, 1, 0)
notehead_layer = 2
notehead = np.where(sep == notehead_layer, 1, 0)
clefs_keys_layer = 3
clefs_keys = np.where(sep == clefs_keys_layer, 1, 0)
return staff, symbols, stems_rests, notehead, clefs_keys
class ExtractResult:
def __init__(
self,
filename: Path,
original: NDArray,
staff: NDArray,
symbols: NDArray,
stems_rests: NDArray,
notehead: NDArray,
clefs_keys: NDArray,
):
self.filename = filename
self.original = original
self.staff = staff
self.symbols = symbols
self.stems_rests = stems_rests
self.notehead = notehead
self.clefs_keys = clefs_keys
def extract(original_image: NDArray, img_path_str: str, use_cache: bool = False) -> ExtractResult:
img_path = Path(img_path_str)
f_name = os.path.splitext(img_path.name)[0]
npy_path = img_path.parent / f"{f_name}.npy"
loaded_from_cache = False
if npy_path.exists() and use_cache:
eprint("Found a cache")
file_hash = hashlib.sha256(original_image).hexdigest() # type: ignore
with lzma.open(npy_path, "rb") as f:
staff = np.load(f)
notehead = np.load(f)
symbols = np.load(f)
stems_rests = np.load(f)
clefs_keys = np.load(f)
cached_file_hash = f.readline().decode().strip()
model_name = f.readline().decode().strip()
if cached_file_hash == "" or model_name == "":
eprint("Cache is missing meta information, skipping cache")
elif file_hash != cached_file_hash:
eprint("File hash mismatch, skipping cache")
elif model_name != config.segmentation_version:
eprint("Models have been updated, skipping cache")
else:
loaded_from_cache = True
eprint("Loading from cache")
if not loaded_from_cache:
ori_inf_type = os.environ.get("INFERENCE_WITH_TF", None)
os.environ["INFERENCE_WITH_TF"] = "true"
staff, symbols, stems_rests, notehead, clefs_keys = generate_pred(original_image)
if ori_inf_type is not None:
os.environ["INFERENCE_WITH_TF"] = ori_inf_type
else:
del os.environ["INFERENCE_WITH_TF"]
if use_cache:
eprint("Saving cache")
file_hash = hashlib.sha256(original_image).hexdigest() # type: ignore
with lzma.open(npy_path, "wb") as f:
np.save(f, staff)
np.save(f, notehead)
np.save(f, symbols)
np.save(f, stems_rests)
np.save(f, clefs_keys)
f.write((file_hash + "\n").encode())
f.write((config.segmentation_version + "\n").encode())
original_image = cv2.resize(original_image, (staff.shape[1], staff.shape[0]))
return ExtractResult(
img_path, original_image, staff, symbols, stems_rests, notehead, clefs_keys
)
def segmentation(image: NDArray, img_path: str, use_cache: bool = False) -> ExtractResult:
return extract(image, img_path, use_cache=use_cache)
| 4,276 | Python | .py | 103 | 31.941748 | 99 | 0.603619 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,879 | download.py | liebharc_homr/training/download.py | import os
from pathlib import Path
from homr.download_utils import download_file, untar_file, unzip_file
from homr.simple_logging import eprint
script_location = os.path.dirname(os.path.realpath(__file__))
git_root = Path(script_location).parent.absolute()
dataset_root = os.path.join(git_root, "datasets")
def download_cvs_musicma() -> str:
dataset_path = os.path.join(dataset_root, "CvcMuscima-Distortions")
if os.path.exists(dataset_path):
return dataset_path
eprint(
"Downloading Staff Removal set from http://pages.cvc.uab.es/cvcmuscima/index_database.html"
)
download_url = "http://datasets.cvc.uab.es/muscima/CVCMUSCIMA_SR.zip"
download_path = os.path.join(dataset_root, "CVCMUSCIMA_SR.zip")
download_file(download_url, download_path)
eprint("Extracting download")
unzip_file(download_path, script_location)
eprint("Download complete")
return dataset_path
def download_deep_scores() -> str:
dataset_path = os.path.join(dataset_root, "ds2_dense")
if os.path.exists(dataset_path):
return dataset_path
eprint("Downloading deep DeepScoresV2 Dense from https://zenodo.org/records/4012193")
download_url = "https://zenodo.org/records/4012193/files/ds2_dense.tar.gz?download=1"
download_path = os.path.join(dataset_root, "ds2_dense.tar.gz")
download_file(download_url, download_path)
eprint("Extracting download")
untar_file(download_path, script_location)
eprint("Download complete")
return dataset_path
| 1,519 | Python | .py | 33 | 41.575758 | 99 | 0.738514 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,880 | music_xml.py | liebharc_homr/training/music_xml.py | import xml.etree.ElementTree as ET
import musicxml.xmlelement.xmlelement as mxl # type: ignore
from musicxml.parser.parser import _parse_node # type: ignore
from homr import constants
from homr.circle_of_fifths import KeyTransformation, circle_of_fifth_to_key_signature
from homr.simple_logging import eprint
class MusicXmlValidationError(Exception):
pass
class SymbolWithPosition:
def __init__(self, position: int, symbol: str) -> None:
self.position = position
self.symbol = symbol
class SemanticMeasure:
def __init__(self, number_of_clefs: int) -> None:
self.staffs: list[list[SymbolWithPosition]] = [[] for _ in range(number_of_clefs)]
self.current_position = 0
def append_symbol(self, symbol: str) -> None:
if len(self.staffs) == 0:
raise ValueError("Expected to get clefs as first symbol")
if symbol.startswith("note"):
raise ValueError("Call append_note for notes")
else:
for staff in self.staffs:
staff.append(SymbolWithPosition(-1, symbol))
def append_symbol_to_staff(self, staff: int, symbol: str) -> None:
if len(self.staffs) == 0:
raise ValueError("Expected to get clefs as first symbol")
if symbol.startswith("note"):
raise ValueError("Call append_note for notes")
else:
self.staffs[staff].append(SymbolWithPosition(-1, symbol))
def append_position_change(self, duration: int) -> None:
if len(self.staffs) == 0:
raise ValueError("Expected to get clefs as first symbol")
new_position = self.current_position + duration
if new_position < 0:
raise ValueError(
"Backup duration is too long " + str(self.current_position) + " " + str(duration)
)
self.current_position = new_position
def append_rest(self, staff: int, duration: int, symbol: str) -> None:
self.append_note(staff, False, duration, symbol)
def append_note(self, staff: int, is_chord: bool, duration: int, symbol: str) -> None:
if len(self.staffs) == 0:
raise ValueError("Expected to get clefs as first symbol")
if is_chord:
if len(self.staffs[staff]) == 0:
raise ValueError("A chord requires a previous note")
previous_symbol = self.staffs[staff][-1]
self.staffs[staff].append(SymbolWithPosition(previous_symbol.position, symbol))
self.current_position = previous_symbol.position + duration
else:
self.staffs[staff].append(SymbolWithPosition(self.current_position, symbol))
self.current_position += duration
def complete_measure(self) -> list[list[str]]:
result: list[list[str]] = []
for staff in self.staffs:
result_staff: list[str] = []
grouped_symbols: dict[int, list[str]] = {}
for symbol in staff:
if symbol.position < 0:
# Directly append clefs, keys and time signatures
result_staff.append(symbol.symbol)
continue
if symbol.position not in grouped_symbols:
grouped_symbols[symbol.position] = []
grouped_symbols[symbol.position].append(symbol.symbol)
for position in sorted(grouped_symbols.keys()):
result_staff.append(str.join("|", grouped_symbols[position]))
result_staff.append("barline")
result.append(result_staff)
return result
class SemanticPart:
def __init__(self) -> None:
self.current_measure: SemanticMeasure | None = None
self.staffs: list[list[str]] = []
def append_clefs(self, clefs: list[str]) -> None:
if self.current_measure is not None:
if len(self.current_measure.staffs) != len(clefs):
raise ValueError("Number of clefs changed")
for staff, clef in enumerate(clefs):
if not any(symbol.symbol == clef for symbol in self.current_measure.staffs[staff]):
raise MusicXmlValidationError("Clef changed")
return
self.staffs = [[] for _ in range(len(clefs))]
measure = SemanticMeasure(len(clefs))
for staff, clef in enumerate(clefs):
measure.append_symbol_to_staff(staff, clef)
self.current_measure = measure
def append_symbol(self, symbol: str) -> None:
if self.current_measure is None:
raise ValueError("Expected to get clefs as first symbol")
self.current_measure.append_symbol(symbol)
def append_rest(self, staff: int, duration: int, symbol: str) -> None:
if self.current_measure is None:
raise ValueError("Expected to get clefs as first symbol")
self.current_measure.append_rest(staff, duration, symbol)
def append_note(self, staff: int, is_chord: bool, duration: int, symbol: str) -> None:
if self.current_measure is None:
raise ValueError("Expected to get clefs as first symbol")
self.current_measure.append_note(staff, is_chord, duration, symbol)
def append_position_change(self, duration: int) -> None:
if self.current_measure is None:
raise ValueError("Expected to get clefs as first symbol")
self.current_measure.append_position_change(duration)
def on_end_of_measure(self) -> None:
if self.current_measure is None:
raise ValueError("Expected to get clefs as first symbol")
if self.current_measure.current_position == 0:
# Measure was reset to start, likely to add a another voice
# to it
return
for staff, result in enumerate(self.current_measure.complete_measure()):
self.staffs[staff].extend(result)
self.current_measure = SemanticMeasure(len(self.current_measure.staffs))
def get_staffs(self) -> list[list[str]]:
return self.staffs
def _translate_duration(duration: str) -> str:
definition = {
"breve": "double_whole",
"whole": "whole",
"half": "half",
"quarter": "quarter",
"eighth": "eighth",
"16th": "sixteenth",
"32nd": "thirty_second",
"64th": "sixty_fourth",
}
return definition[duration]
def _get_alter(note: mxl.XMLPitch) -> str: # type: ignore
alter = note.get_children_of_type(mxl.XMLAlter)
if len(alter) == 0:
return ""
alter_value = int(alter[0].value_)
if alter_value == 1:
return "#"
if alter_value == -1:
return "b"
if alter_value == 0:
return "N"
return ""
def _get_alter_from_courtesey(accidental: mxl.XMLAccidental) -> str: # type: ignore
value = accidental.value_
if value == "sharp":
return "#"
if value == "flat":
return "b"
if value == "natural":
return "N"
return ""
def _count_dots(note: mxl.XMLNote) -> str: # type: ignore
dots = note.get_children_of_type(mxl.XMLDot)
return "." * len(dots)
def _get_triplet_mark(note: mxl.XMLNote) -> str: # type: ignore
time_modification = note.get_children_of_type(mxl.XMLTimeModification)
if len(time_modification) == 0:
return ""
actual_notes = time_modification[0].get_children_of_type(mxl.XMLActualNotes)
if len(actual_notes) == 0:
return ""
normal_notes = time_modification[0].get_children_of_type(mxl.XMLNormalNotes)
if len(normal_notes) == 0:
return ""
is_triplet = (
int(actual_notes[0].value_) == 3 and int(normal_notes[0].value_) == 2 # noqa: PLR2004
)
is_sixtuplet = (
int(actual_notes[0].value_) == 6 and int(normal_notes[0].value_) == 4 # noqa: PLR2004
)
if is_triplet or is_sixtuplet:
return constants.triplet_symbol
return ""
def _process_attributes( # type: ignore
semantic: SemanticPart, attribute: mxl.XMLAttributes, key: KeyTransformation
) -> KeyTransformation:
clefs = attribute.get_children_of_type(mxl.XMLClef)
if len(clefs) > 0:
clefs_semantic = []
for clef in clefs:
sign = clef.get_children_of_type(mxl.XMLSign)[0].value_
line = clef.get_children_of_type(mxl.XMLLine)[0].value_
clefs_semantic.append("clef-" + sign + str(line))
semantic.append_clefs(clefs_semantic)
keys = attribute.get_children_of_type(mxl.XMLKey)
if len(keys) > 0:
fifths = keys[0].get_children_of_type(mxl.XMLFifths)[0].value_
semantic.append_symbol("keySignature-" + circle_of_fifth_to_key_signature(int(fifths)))
key = KeyTransformation(int(fifths))
times = attribute.get_children_of_type(mxl.XMLTime)
if len(times) > 0:
beats = times[0].get_children_of_type(mxl.XMLBeats)[0].value_
beat_type = times[0].get_children_of_type(mxl.XMLBeatType)[0].value_
semantic.append_symbol("timeSignature-" + beats + "/" + beat_type)
return key
def _process_note( # type: ignore
semantic: SemanticPart, note: mxl.XMLNote, key: KeyTransformation
) -> KeyTransformation:
staff = 0
staff_nodes = note.get_children_of_type(mxl.XMLStaff)
if len(staff_nodes) > 0:
staff = int(staff_nodes[0].value_) - 1
is_chord = len(note.get_children_of_type(mxl.XMLChord)) > 0
if len(note.get_children_of_type(mxl.XMLDuration)) == 0:
is_grace_note = len(note.get_children_of_type(mxl.XMLGrace)) > 0
if not is_grace_note:
eprint("Note without duration", note.get_children())
duration = 0
else:
duration = int(note.get_children_of_type(mxl.XMLDuration)[0].value_)
rest = note.get_children_of_type(mxl.XMLRest)
if len(rest) > 0:
dot = _count_dots(note)
if rest[0] and rest[0].attributes.get("measure", None):
semantic.append_rest(staff, duration, "rest-whole" + dot)
else:
duration_type = note.get_children_of_type(mxl.XMLType)[0].value_
semantic.append_rest(
staff, duration, "rest-" + _translate_duration(duration_type) + dot
)
pitch = note.get_children_of_type(mxl.XMLPitch)
if len(pitch) > 0:
alter = _get_alter(pitch[0])
step = pitch[0].get_children_of_type(mxl.XMLStep)[0].value_
octave = pitch[0].get_children_of_type(mxl.XMLOctave)[0].value_
duration_type = note.get_children_of_type(mxl.XMLType)[0].value_
alter = key.add_accidental(
step + str(octave),
alter,
)
courtesey_accidental = note.get_children_of_type(mxl.XMLAccidental)
if len(courtesey_accidental) > 0:
alter = _get_alter_from_courtesey(courtesey_accidental[0])
semantic.append_note(
staff,
is_chord,
duration,
"note-"
+ step
+ str(octave)
+ alter
+ "_"
+ _translate_duration(duration_type)
+ _count_dots(note)
+ _get_triplet_mark(note),
)
return key
def _process_backup(semantic: SemanticPart, backup: mxl.XMLBackup) -> None: # type: ignore
backup_value = int(backup.get_children_of_type(mxl.XMLDuration)[0].value_)
semantic.append_position_change(-backup_value)
def _process_forward(semantic: SemanticPart, backup: mxl.XMLBackup) -> None: # type: ignore
forward_value = int(backup.get_children_of_type(mxl.XMLDuration)[0].value_)
semantic.append_position_change(forward_value)
def _music_part_to_semantic(part: mxl.XMLPart) -> list[list[str]]: # type: ignore
semantic = SemanticPart()
key = KeyTransformation(0)
for measure in part.get_children_of_type(mxl.XMLMeasure):
for child in measure.get_children():
if isinstance(child, mxl.XMLAttributes):
key = _process_attributes(semantic, child, key)
if isinstance(child, mxl.XMLNote):
key = _process_note(semantic, child, key)
if isinstance(child, mxl.XMLBackup):
_process_backup(semantic, child)
if isinstance(child, mxl.XMLForward):
_process_forward(semantic, child)
semantic.on_end_of_measure()
key = key.reset_at_end_of_measure()
return semantic.get_staffs()
def _remove_dynamics_attribute_from_nodes_recursive(node: ET.Element) -> None:
"""
We don't need the dynamics attribute in the XML, but XSD validation
sometimes fails if its negative. So we remove it.
"""
if "dynamics" in node.attrib:
del node.attrib["dynamics"]
for child in node:
_remove_dynamics_attribute_from_nodes_recursive(child)
def _music_xml_content_to_semantic(element: ET.Element) -> list[list[str]]:
_remove_dynamics_attribute_from_nodes_recursive(element)
root = _parse_node(element)
result = []
for part in root.get_children_of_type(mxl.XMLPart):
semantic = _music_part_to_semantic(part)
result.extend(semantic)
return result
def music_xml_string_to_semantic(content: str) -> list[list[str]]:
xml = ET.fromstring(content) # noqa: S314
return _music_xml_content_to_semantic(xml)
def music_xml_to_semantic(file_path: str) -> list[list[str]]:
with open(file_path) as file:
xml = ET.parse(file) # noqa: S314
return _music_xml_content_to_semantic(xml.getroot())
def group_in_measures(semantic: list[str]) -> tuple[str, list[list[str]]]:
result: list[list[str]] = []
clef = ""
key = ""
current_measure: list[str] = []
for symbol in semantic:
if symbol == "barline":
current_measure.append(symbol)
result.append(current_measure)
current_measure = []
else:
current_measure.append(symbol)
if symbol.startswith("clef"):
clef = symbol
elif symbol.startswith("keySignature"):
key = symbol
if len(current_measure) > 0:
result.append(current_measure)
prelude = clef + "+" + key + "+"
return prelude, result
| 14,131 | Python | .py | 312 | 36.592949 | 99 | 0.630585 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,881 | train.py | liebharc_homr/training/train.py | import argparse
import os
import sys
import tensorflow as tf
from homr.simple_logging import eprint
from training import download
from training.run_id import get_run_id
from training.segmentation import train
from training.segmentation.model_utils import save_model
from training.transformer.train import train_transformer
def get_segmentation_model_path(model_name: str) -> str:
model_path = os.path.join(script_location, "..", "homr", "segmentation")
run_id = get_run_id()
return os.path.join(model_path, f"{model_name}_{run_id}")
script_location = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description="Train a model")
parser.add_argument("model_name", type=str, help="The name of the model to train")
parser.add_argument(
"--fp32",
action="store_true",
help="Only applicable for the transformer: Trains with fp32 accuracy",
)
args = parser.parse_args()
model_type = args.model_name
if model_type == "segnet":
dataset = download.download_deep_scores()
model = train.train_model(dataset, data_model=model_type, steps=1500, epochs=15)
filename = get_segmentation_model_path(model_type)
meta = {
"input_shape": list(model.input_shape),
"output_shape": list(model.output_shape),
}
save_model(model, meta, filename)
eprint("Model saved as " + filename)
elif model_type == "unet":
dataset = download.download_cvs_musicma()
model = train.train_model(dataset, data_model=model_type, steps=1500, epochs=10)
filename = get_segmentation_model_path(model_type)
meta = {
"input_shape": list(model.input_shape),
"output_shape": list(model.output_shape),
}
save_model(model, meta, filename)
eprint("Model saved as " + filename)
elif model_type in ["unet_from_checkpoint", "segnet_from_checkpoint"]:
model = tf.keras.models.load_model(
"seg_unet.keras", custom_objects={"WarmUpLearningRate": train.WarmUpLearningRate}
)
model_name = model_type.split("_")[0]
filename = get_segmentation_model_path(model_name)
meta = {
"input_shape": list(model.input_shape),
"output_shape": list(model.output_shape),
}
save_model(model, meta, filename)
eprint("Model saved as " + filename)
elif model_type == "transformer":
train_transformer(fp32=args.fp32)
else:
eprint("Unknown model: " + model_type)
sys.exit(1)
| 2,409 | Python | .py | 61 | 35.393443 | 89 | 0.711843 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,882 | musescore_svg.py | liebharc_homr/training/musescore_svg.py | import glob
import math
from xml.dom import minidom
from homr import constants
from homr.simple_logging import eprint
class SvgValidationError(Exception):
pass
class SvgRectangle:
def __init__(self, x: int, y: int, width: int, height: int):
self.x = x
self.y = y
self.width = width
self.height = height
def intersects(self, rect2: "SvgRectangle") -> bool:
# Unpack the rectangles
x1, y1, width1, height1 = [self.x, self.y, self.width, self.height]
x2, y2, width2, height2 = [rect2.x, rect2.y, rect2.width, rect2.height]
if x1 + width1 < x2 or x2 + width2 < x1:
return False
if y1 + height1 < y2 or y2 + height2 < y1:
return False
return True
def merge(self, other: "SvgRectangle") -> "SvgRectangle":
x = min(self.x, other.x)
y = min(self.y, other.y)
width = max(self.x + self.width, other.x + other.width) - x
height = max(self.y + self.height, other.y + other.height) - y
return SvgRectangle(x, y, width, height)
def __str__(self) -> str:
return f"({self.x}, {self.y}, {self.width}, {self.height})"
def __repr__(self) -> str:
return self.__str__()
class SvgStaff(SvgRectangle):
def __init__(self, x: int, y: int, width: int, height: int):
super().__init__(x, y, width, height)
self.bar_line_x_positions = set()
# Add the starting and ending barline
self.bar_line_x_positions.add(self.x)
self.bar_line_x_positions.add(self.x + self.width)
self.min_measure_width = 100
def add_bar_line(self, bar_line: SvgRectangle) -> None:
already_present = any(
abs(bar_line.x - x) < self.min_measure_width for x in self.bar_line_x_positions
)
if not already_present:
self.bar_line_x_positions.add(bar_line.x)
@property
def number_of_measures(self) -> int:
return len(self.bar_line_x_positions) - 1
def __str__(self) -> str:
return f"({self.x}, {self.y}, {self.width}, {self.height}): {self.number_of_measures}"
def __repr__(self) -> str:
return self.__str__()
class SvgMusicFile:
def __init__(self, filename: str, width: float, height: float, staffs: list[SvgStaff]):
self.filename = filename
self.width = width
self.height = height
self.staffs = staffs
def get_position_from_multiple_svg_files(musicxml_file: str) -> list[SvgMusicFile]:
pattern = musicxml_file.replace(".musicxml", "*.svg")
svgs = glob.glob(pattern)
sorted_by_id = sorted(svgs, key=lambda x: int(x.split("-")[-1].split(".")[0]))
result: list[SvgMusicFile] = []
for svg in sorted_by_id:
result.append(get_position_information_from_svg(svg))
return result
def _parse_paths(points: str) -> SvgRectangle:
[start, end] = points.split()
[x1, y1] = start.split(",")
[x2, y2] = end.split(",")
return SvgRectangle(
math.floor(float(x1)),
math.floor(float(y1)),
math.ceil(float(x2) - float(x1)),
math.ceil(float(y2) - float(y1)),
)
def _combine_staff_lines_and_bar_lines(
staff_lines: list[SvgRectangle], bar_lines: list[SvgRectangle]
) -> list[SvgStaff]:
if len(staff_lines) % constants.number_of_lines_on_a_staff != 0:
eprint("Warning: Staff lines are not a multiple of 5, but is ", len(staff_lines))
return []
groups: list[list[SvgRectangle]] = []
staffs_sorted_by_y = sorted(staff_lines, key=lambda s: s.y)
for i, staff_line in enumerate(staffs_sorted_by_y):
if i % constants.number_of_lines_on_a_staff == 0:
groups.append([])
groups[-1].append(staff_line)
merged_groups: list[SvgRectangle] = []
for group in groups:
merged_group = group[0]
for line in group[1:]:
merged_group = merged_group.merge(line)
merged_groups.append(merged_group)
staffs = [SvgStaff(staff.x, staff.y, staff.width, staff.height) for staff in merged_groups]
for bar_line in bar_lines:
for staff in staffs:
if staff.intersects(bar_line):
staff.add_bar_line(bar_line)
return staffs
def get_position_information_from_svg(svg_file: str) -> SvgMusicFile:
doc = minidom.parse(svg_file) # noqa: S318
svg_element = doc.getElementsByTagName("svg")[0]
width = float(svg_element.getAttribute("width").replace("px", ""))
height = float(svg_element.getAttribute("height").replace("px", ""))
lines = doc.getElementsByTagName("polyline")
staff_lines: list[SvgRectangle] = []
bar_lines: list[SvgRectangle] = []
for line in lines:
class_name = line.getAttribute("class")
if class_name == "StaffLines":
staff_lines.append(_parse_paths(line.getAttribute("points")))
if class_name == "BarLine":
bar_lines.append(_parse_paths(line.getAttribute("points")))
paths = doc.getElementsByTagName("path")
number_of_clefs = 0
for path in paths:
class_name = path.getAttribute("class")
if class_name == "Clef":
number_of_clefs += 1
combined = _combine_staff_lines_and_bar_lines(staff_lines, bar_lines)
if len(combined) != number_of_clefs:
raise SvgValidationError(
f"Number of clefs {number_of_clefs} does not match the number of staffs {len(combined)}"
)
return SvgMusicFile(svg_file, width, height, combined)
| 5,499 | Python | .py | 127 | 36.023622 | 100 | 0.626896 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,883 | show_examples_from_index.py | liebharc_homr/training/show_examples_from_index.py | # ruff: noqa: T201
import os
import sys
from typing import Any
import cv2
import numpy as np
from termcolor import colored
index_file_name = sys.argv[1]
number_of_samples_per_iteration = int(sys.argv[2])
index_file = open(index_file_name)
index_lines = index_file.readlines()
index_file.close()
np.random.shuffle(index_lines)
def print_color(text: str, highlights: list[str], color: Any) -> None:
words = text.split()
for word in words:
if any(highlight in word for highlight in highlights):
print(colored(word, color), end=" ")
else:
print(word, end=" ")
print()
while True:
batch = []
for _ in range(number_of_samples_per_iteration):
if len(index_lines) == 0:
break
batch.append(index_lines.pop())
if len(batch) == 0:
break
images = None
print()
print()
print()
print("==========================================")
print()
for line in batch:
image_path, semantic_path = line.strip().split(",")
agnostic_path = semantic_path.replace(".semantic", ".agnostic")
image = cv2.imread(image_path)
with open(semantic_path) as file:
semantic = file.readline().strip().replace("+", " ")
if os.path.exists(agnostic_path):
with open(agnostic_path) as file:
original_agnostic = file.readline().strip().replace("+", " ")
else:
original_agnostic = agnostic_path
if images is None:
images = image
else:
images = np.concatenate((images, image), axis=0)
print()
print(">>> " + image_path)
print_color(semantic, ["barline", "#", "N", "b"], "green")
cv2.imshow("Images", images) # type: ignore
escKey = 27
if cv2.waitKey(0) == escKey:
break
| 1,845 | Python | .py | 57 | 25.701754 | 77 | 0.586382 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,884 | convert_grandstaff.py | liebharc_homr/training/convert_grandstaff.py | import multiprocessing
import os
import platform
import stat
import sys
from pathlib import Path
import cv2
import numpy as np
import PIL
import PIL.Image
from scipy.signal import find_peaks # type: ignore
from torchvision import transforms as tr # type: ignore
from torchvision.transforms import Compose # type: ignore
from homr.download_utils import download_file, untar_file
from homr.simple_logging import eprint
from homr.staff_dewarping import warp_image_randomly
from homr.staff_parsing import add_image_into_tr_omr_canvas
from homr.type_definitions import NDArray
from training.musescore_svg import SvgValidationError
from training.music_xml import MusicXmlValidationError, music_xml_to_semantic
script_location = os.path.dirname(os.path.realpath(__file__))
git_root = Path(script_location).parent.absolute()
dataset_root = os.path.join(git_root, "datasets")
grandstaff_root = os.path.join(dataset_root, "grandstaff")
grandstaff_train_index = os.path.join(grandstaff_root, "index.txt")
hum2xml = os.path.join(dataset_root, "hum2xml")
if platform.system() == "Windows":
eprint("Transformer training is only implemented for Linux")
eprint("Feel free to submit a PR to support Windows")
eprint("The main work should be to download hum2xml.exe and change the calls")
eprint("to use the exe-file instead of the linux binary.")
sys.exit(1)
if not os.path.exists(hum2xml):
eprint("Downloading hum2xml from https://extras.humdrum.org/man/hum2xml/")
download_file("http://extras.humdrum.org/bin/linux/hum2xml", hum2xml)
os.chmod(hum2xml, stat.S_IXUSR)
if not os.path.exists(grandstaff_root):
eprint("Downloading grandstaff from https://sites.google.com/view/multiscore-project/datasets")
grandstaff_archive = os.path.join(dataset_root, "grandstaff.tgz")
download_file("https://grfia.dlsi.ua.es/musicdocs/grandstaff.tgz", grandstaff_archive)
untar_file(grandstaff_archive, grandstaff_root)
def _get_dark_pixels_per_row(image: NDArray) -> NDArray:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dark_pixels_per_row = np.zeros(gray.shape[0])
dark_threshold = 200
for i in range(gray.shape[0]):
for j in range(gray.shape[1]):
if gray[i, j] < dark_threshold:
dark_pixels_per_row[i] += 1
return dark_pixels_per_row
def _find_central_valleys(image: NDArray, dark_pixels_per_row: NDArray) -> np.int32 | None:
conv_len = image.shape[0] // 4 + 1
blurred = np.convolve(dark_pixels_per_row, np.ones(conv_len) / conv_len, mode="same")
# Find the central valley
peaks, _ = find_peaks(-blurred, distance=10, prominence=1)
if len(peaks) == 1:
peaks = [peaks[len(peaks) // 2]]
middle = peaks[0]
return np.int32(middle)
return None
def _split_staff_image(path: str, basename: str) -> tuple[str | None, str | None]:
"""
This algorithm is taken from `oemer` staffline extraction algorithm. In this simplified version
it only works with images which have no distortions.
"""
image = cv2.imread(path)
dark_pixels_per_row = _get_dark_pixels_per_row(image)
upper_bound, lower_bound = _get_image_bounds(dark_pixels_per_row)
image = image[upper_bound:-lower_bound]
dark_pixels_per_row = dark_pixels_per_row[upper_bound:-lower_bound]
norm = (dark_pixels_per_row - np.mean(dark_pixels_per_row)) / np.std(dark_pixels_per_row)
centers, _ = find_peaks(norm, height=1.4, distance=3, prominence=1)
lines_per_staff = 5
if len(centers) == lines_per_staff:
upper = _prepare_image(image)
predistorted_path = basename + "_distorted.jpg"
if os.path.exists(predistorted_path):
predistorted_image = cv2.imread(predistorted_path)
single_image = _prepare_image(predistorted_image)
cv2.imwrite(basename + "_single-pre.jpg", single_image)
return distort_image(basename + "_single-pre.jpg"), None
eprint(f"INFO: Couldn't find pre-distorted image {path}, using custom distortions")
cv2.imwrite(basename + "_upper-pre.jpg", upper)
return distort_image(basename + "_upper-pre.jpg"), None
elif len(centers) == 2 * lines_per_staff:
middle = np.int32(np.round((centers[4] + centers[5]) / 2))
else:
central_valley = _find_central_valleys(image, dark_pixels_per_row)
if central_valley is None:
return None, None
middle = central_valley
overlap = 10
if middle < overlap or middle > image.shape[0] - overlap:
eprint(f"INFO: Failed to split {path}, middle is at {middle}")
return None, None
upper = _prepare_image(image[: middle + overlap])
lower = _prepare_image(image[middle - overlap :])
cv2.imwrite(basename + "_upper-pre.jpg", upper)
cv2.imwrite(basename + "_lower-pre.jpg", lower)
return distort_image(basename + "_upper-pre.jpg"), distort_image(basename + "_lower-pre.jpg")
def _prepare_image(image: NDArray) -> NDArray:
result = add_image_into_tr_omr_canvas(image)
return result
def _get_image_bounds(dark_pixels_per_row: NDArray) -> tuple[int, int]:
white_upper_area_size = 0
for i in range(dark_pixels_per_row.shape[0]):
if dark_pixels_per_row[i] > 0:
break
white_upper_area_size += 1
white_lower_area_size = 1
for i in range(dark_pixels_per_row.shape[0] - 1, 0, -1):
if dark_pixels_per_row[i] > 0:
break
white_lower_area_size += 1
return white_upper_area_size, white_lower_area_size
def _check_staff_image(path: str, basename: str) -> tuple[str | None, str | None]:
"""
This method helps with reprocessing a folder more quickly by skipping
the image splitting.
"""
if not os.path.exists(basename + "_upper-pre.jpg"):
return None, None
return basename + "_upper-pre.jpg", basename + "_lower-pre.jpg"
def distort_image(path: str) -> str:
image = PIL.Image.open(path)
image = _add_random_gray_tone(image)
pipeline = Compose(
[
tr.RandomRotation(degrees=1),
tr.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
tr.RandomAdjustSharpness(2),
]
)
augmented_image = pipeline(img=image)
augmented_image = warp_image_randomly(augmented_image)
augmented_image.save(path)
return path
def _add_random_gray_tone(image: PIL.Image.Image) -> PIL.Image.Image:
image_arr = np.array(image)
random_gray_value = 255 - np.random.randint(0, 50)
mask = np.all(image_arr > random_gray_value, axis=-1)
jitter = np.random.randint(-5, 5, size=mask.shape)
gray = np.clip(random_gray_value + jitter, 0, 255)
image_arr[mask] = gray[mask, None]
return PIL.Image.fromarray(image_arr)
def contains_max_one_clef(semantic: str) -> bool:
"""
hum2xml sometimes generates invalid musicxml which
we can detect by checking for multiple clefs, e.g.
scarlatti-d/keyboard-sonatas/L481K025/min3_up_m-79-82.krn
The issue here is likely that it uses two G2 clefs, and
overlays them on top of each other to indicate
multiple notes at the same time.
"""
return semantic.count("clef-") <= 1
def _music_xml_to_semantic(path: str, basename: str) -> tuple[str | None, str | None]:
result = music_xml_to_semantic(path)
staffs_in_grandstaff = 2
if len(result) != staffs_in_grandstaff:
return None, None
lines = [" ".join(staff) for staff in result]
if not all(contains_max_one_clef(line) for line in lines):
return None, None
with open(basename + "_upper.semantic", "w") as f:
f.write(lines[0])
with open(basename + "_lower.semantic", "w") as f:
f.write(lines[1])
return basename + "_upper.semantic", basename + "_lower.semantic"
def _convert_file( # noqa: PLR0911
path: Path, ony_recreate_semantic_files: bool = False
) -> list[str]:
try:
basename = str(path).replace(".krn", "")
image_file = str(path).replace(".krn", ".jpg")
musicxml = str(path).replace(".krn", ".musicxml")
result = os.system(f"{hum2xml} {path} > {musicxml}") # noqa: S605
if result != 0:
eprint(f"Failed to convert {path}")
return []
upper_semantic, lower_semantic = _music_xml_to_semantic(musicxml, basename)
if upper_semantic is None or lower_semantic is None:
eprint(f"Failed to convert {musicxml}")
return []
if ony_recreate_semantic_files:
upper, lower = _check_staff_image(image_file, basename)
else:
upper, lower = _split_staff_image(image_file, basename)
if upper is None:
return []
if lower is None:
return [
str(Path(upper).relative_to(git_root))
+ ","
+ str(Path(upper_semantic).relative_to(git_root)),
]
return [
str(Path(upper).relative_to(git_root))
+ ","
+ str(Path(upper_semantic).relative_to(git_root)),
str(Path(lower).relative_to(git_root))
+ ","
+ str(Path(lower_semantic).relative_to(git_root)),
]
except (SvgValidationError, MusicXmlValidationError):
return []
except Exception as e:
eprint("Failed to convert ", path, e)
return []
def _convert_file_only_semantic(path: Path) -> list[str]:
return _convert_file(path, True)
def _convert_semantic_and_image(path: Path) -> list[str]:
return _convert_file(path, False)
def convert_grandstaff(only_recreate_semantic_files: bool = False) -> None:
index_file = grandstaff_train_index
if only_recreate_semantic_files:
index_file = os.path.join(grandstaff_root, "index_tmp.txt")
eprint("Indexing Grandstaff dataset, this can up to several hours.")
krn_files = list(Path(grandstaff_root).rglob("*.krn"))
with open(index_file, "w") as f:
file_number = 0
skipped_files = 0
with multiprocessing.Pool() as p:
for result in p.imap_unordered(
(
_convert_file_only_semantic
if only_recreate_semantic_files
else _convert_semantic_and_image
),
krn_files,
):
if len(result) > 0:
for line in result:
f.write(line + "\n")
else:
skipped_files += 1
file_number += 1
if file_number % 1000 == 0:
eprint(
f"Processed {file_number}/{len(krn_files)} files,",
f"skipped {skipped_files} files",
)
eprint("Done indexing")
if __name__ == "__main__":
multiprocessing.set_start_method("spawn")
only_recreate_semantic_files = False
if "--only-semantic" in sys.argv:
only_recreate_semantic_files = True
convert_grandstaff(only_recreate_semantic_files)
| 11,060 | Python | .py | 248 | 37.221774 | 99 | 0.648797 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,885 | run_id.py | liebharc_homr/training/run_id.py | import os
def get_run_id() -> str:
git_count = os.popen("git rev-list --count HEAD").read().strip() # noqa: S605, S607
git_head = os.popen("git rev-parse HEAD").read().strip() # noqa: S605, S607
return f"{git_count}-{git_head}"
| 244 | Python | .py | 5 | 45 | 88 | 0.628692 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,886 | convert_primus.py | liebharc_homr/training/convert_primus.py | import multiprocessing
import os
import random
from collections.abc import Generator
from pathlib import Path
import cv2
from homr.download_utils import download_file, untar_file
from homr.simple_logging import eprint
from homr.staff_parsing import add_image_into_tr_omr_canvas
from training.convert_grandstaff import distort_image
script_location = os.path.dirname(os.path.realpath(__file__))
git_root = Path(script_location).parent.absolute()
dataset_root = os.path.join(git_root, "datasets")
primus = os.path.join(dataset_root, "Corpus")
primus_train_index = os.path.join(primus, "index.txt")
primus_distorted_train_index = os.path.join(primus, "distored_index.txt")
if not os.path.exists(primus):
eprint("Downloading Camera-PrIMuS from https://grfia.dlsi.ua.es/primus/")
primus_archive = os.path.join(dataset_root, "CameraPrIMuS.tgz")
download_file("https://grfia.dlsi.ua.es/primus/packages/CameraPrIMuS.tgz", primus_archive)
untar_file(primus_archive, dataset_root) # the archive contains already a Corpus folder
def _replace_suffix(path: Path, suffix: str) -> Path | None:
suffixes = [".jpg", ".jpeg", ".png"]
if suffix == ".semantic":
suffixes.insert(0, "_distorted.jpg")
for s in suffixes:
if s in str(path):
return Path(str(path).replace(s, suffix))
return None
def _find_semantic_file(path: Path) -> Path | None:
semantic_file = _replace_suffix(path, ".semantic")
if semantic_file is not None and semantic_file.exists():
return semantic_file
return None
def _convert_file(path: Path, distort: bool = False) -> list[str]:
if "-pre.jpg" in str(path):
return []
if "," in str(path):
return []
image = cv2.imread(str(path))
if image is None:
eprint("Warning: Could not read image", path)
return []
margin_top = random.randint(0, 10)
margin_bottom = random.randint(0, 10)
preprocessed = add_image_into_tr_omr_canvas(image, margin_top, margin_bottom)
preprocessed_path = _replace_suffix(path, "-pre.jpg")
if preprocessed_path is None:
eprint("Warning: Unknown extension", path)
return []
cv2.imwrite(str(preprocessed_path.absolute()), preprocessed)
if distort:
distort_image(str(preprocessed_path.absolute()))
semantic_file = _find_semantic_file(path)
if semantic_file is None:
eprint("Warning: No semantic file found for", path)
return []
return [
str(preprocessed_path.relative_to(git_root))
+ ","
+ str(semantic_file.relative_to(git_root))
+ "\n"
]
def _convert_file_without_distortions(path: Path) -> list[str]:
return _convert_file(path)
def _convert_and_distort_file(path: Path) -> list[str]:
return _convert_file(path, True)
def _convert_dataset(
glob_result: Generator[Path, None, None], index_file: str, distort: bool = False
) -> None:
with open(index_file, "w") as f:
file_number = 0
with multiprocessing.Pool(8) as p:
for result in p.imap_unordered(
_convert_and_distort_file if distort else _convert_file_without_distortions,
glob_result,
):
f.writelines(result)
file_number += 1
if file_number % 1000 == 0:
eprint(f"Processed {file_number} files")
def convert_primus_dataset() -> None:
eprint("Indexing PrIMuS dataset")
_convert_dataset(Path(primus).rglob("*.png"), primus_train_index, distort=True)
eprint("Indexing PrIMuS Distorted dataset")
_convert_dataset(Path(primus).rglob("*_distorted.jpg"), primus_distorted_train_index)
eprint("Done indexing")
if __name__ == "__main__":
multiprocessing.set_start_method("spawn")
convert_primus_dataset()
| 3,815 | Python | .py | 90 | 36.411111 | 94 | 0.675304 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,887 | convert_lieder.py | liebharc_homr/training/convert_lieder.py | import json
import multiprocessing
import os
import platform
import random
import shutil
import stat
import sys
from io import BytesIO
from pathlib import Path
import cairosvg # type: ignore
import cv2
import numpy as np
from PIL import Image
from homr.download_utils import download_file, unzip_file
from homr.simple_logging import eprint
from homr.staff_parsing import add_image_into_tr_omr_canvas
from training.convert_grandstaff import distort_image
from training.musescore_svg import SvgMusicFile, get_position_from_multiple_svg_files
from training.music_xml import group_in_measures, music_xml_to_semantic
from training.segmentation.model_utils import write_text_to_file
script_location = os.path.dirname(os.path.realpath(__file__))
git_root = Path(script_location).parent.absolute()
dataset_root = os.path.join(git_root, "datasets")
lieder = os.path.join(dataset_root, "Lieder-main")
lieder_train_index = os.path.join(lieder, "index.txt")
musescore_path = os.path.join(dataset_root, "MuseScore")
if platform.system() == "Windows":
eprint("Transformer training is only implemented for Linux")
eprint("Feel free to submit a PR to support Windows")
eprint("Running MuseScore with the -j parameter on Windows doesn't work")
eprint("https://github.com/musescore/MuseScore/issues/16221")
sys.exit(1)
if not os.path.exists(musescore_path):
eprint("Downloading MuseScore from https://musescore.org/")
download_file(
"https://cdn.jsdelivr.net/musescore/v4.2.1/MuseScore-4.2.1.240230938-x86_64.AppImage",
musescore_path,
)
os.chmod(musescore_path, stat.S_IXUSR)
if not os.path.exists(lieder):
eprint("Downloading Lieder from https://github.com/OpenScore/Lieder")
lieder_archive = os.path.join(dataset_root, "Lieder.zip")
download_file("https://github.com/OpenScore/Lieder/archive/refs/heads/main.zip", lieder_archive)
unzip_file(lieder_archive, dataset_root)
def copy_all_mscx_files(working_dir: str, dest: str) -> None:
for root, _dirs, files in os.walk(working_dir):
for file in files:
if file.endswith(".mscx"):
source = os.path.join(root, file)
shutil.copyfile(source, os.path.join(dest, file))
def create_formats(source_file: str, formats: list[str]) -> list[dict[str, str]]:
jobs: list[dict[str, str]] = []
# List of files where MuseScore seems to hang up
files_with_known_issues = [
"lc6264558",
"lc5712131",
"lc5995407",
"lc5712146",
"lc6001354",
"lc6248307",
"lc5935864",
]
if any(issue in source_file for issue in files_with_known_issues):
return jobs
for target_format in formats:
dirname = os.path.dirname(source_file)
basename = os.path.basename(source_file)
out_name = dirname + "/" + basename.replace(".mscx", f".{target_format}")
out_name_alt = dirname + "/" + basename.replace(".mscx", f"-1.{target_format}")
if os.path.exists(out_name) or os.path.exists(out_name_alt):
eprint(out_name, "already exists")
continue
job = {
"in": source_file,
"out": out_name,
}
jobs.append(job)
return jobs
def _create_musicxml_and_svg_files() -> None:
dest = os.path.join(lieder, "flat")
os.makedirs(dest, exist_ok=True)
copy_all_mscx_files(os.path.join(lieder, "scores"), dest)
mscx_files = list(Path(dest).rglob("*.mscx"))
MuseScore = os.path.join(dataset_root, "MuseScore")
all_jobs = []
for file in mscx_files:
jobs = create_formats(str(file), ["musicxml", "svg"])
all_jobs.extend(jobs)
if len(all_jobs) == 0:
eprint("All musicxml were already created, going on with the next step")
return
with open("job.json", "w") as f:
json.dump(all_jobs, f)
eprint("Starting with", len(all_jobs), "jobs, with the first being", all_jobs[0])
if os.system(MuseScore + " --force -j job.json") != 0: # noqa: S605
eprint("Error running MuseScore")
os.remove("job.json")
sys.exit(1)
os.remove("job.json")
def _split_file_into_staffs(
semantic: list[list[str]], svg_files: list[SvgMusicFile], just_semantic_files: bool
) -> list[str]:
voice = 0
measures = [group_in_measures(voice) for voice in semantic]
result: list[str] = []
for svg_file in svg_files:
png_file = svg_file.filename.replace(".svg", ".png")
if not just_semantic_files:
target_width = 1400
scale = target_width / svg_file.width
png_data = cairosvg.svg2png(url=svg_file.filename, scale=scale)
pil_img = Image.open(BytesIO(png_data))
image = np.array(pil_img.convert("RGB"))[:, :, ::-1].copy()
for staff_number, staff in enumerate(svg_file.staffs):
staff_image_file_name = png_file.replace(".png", f"-{staff_number}.png")
if not just_semantic_files:
y_offset = staff.height
x_offset = 50
x = staff.x - x_offset
y = staff.y - y_offset
width = staff.width + 2 * x_offset
height = staff.height + 2 * y_offset
x = int(x * scale)
y = int(y * scale)
width = int(width * scale)
height = int(height * scale)
staff_image = image[y : y + height, x : x + width]
margin_top = random.randint(0, 10)
margin_bottom = random.randint(0, 10)
preprocessed = add_image_into_tr_omr_canvas(staff_image, margin_top, margin_bottom)
cv2.imwrite(staff_image_file_name, preprocessed)
staff_image_file_name = distort_image(staff_image_file_name)
elif not os.path.exists(staff_image_file_name):
raise ValueError(f"File {staff_image_file_name} not found")
semantic_file_name = png_file.replace(".png", f"-{staff_number}.semantic")
prelude = measures[voice][0]
selected_measures: list[str] = []
for _ in range(staff.number_of_measures):
selected_measures.append(str.join("+", measures[voice][1].pop(0)))
semantic_content = str.join("+", selected_measures) + "\n"
if not semantic_content.startswith("clef"):
semantic_content = prelude + semantic_content
write_text_to_file(semantic_content, semantic_file_name)
result.append(staff_image_file_name + "," + semantic_file_name + "\n")
voice = (voice + 1) % len(semantic)
if any(len(measure[1]) > 0 for measure in measures):
raise ValueError("Warning: Not all measures were processed")
return result
def _contains_max_one_clef(semantic: list[str]) -> bool:
return sum(1 for s in semantic if s.startswith("clef")) <= 1
def _convert_file(file: Path, just_semantic_files: bool) -> list[str]:
try:
semantic = music_xml_to_semantic(str(file))
number_of_voices = len(semantic)
number_of_measures = semantic[0].count("barline")
if not all(_contains_max_one_clef(s) for s in semantic):
eprint(file, "contains more than one clef")
return []
svg_files = get_position_from_multiple_svg_files(str(file))
measures_in_svg = [sum(s.number_of_measures for s in file.staffs) for file in svg_files]
sum_of_measures_in_xml = number_of_measures * number_of_voices
if sum(measures_in_svg) != sum_of_measures_in_xml:
eprint(
file,
"INFO: Number of measures in SVG files",
sum(measures_in_svg),
"does not match number of measures in XML",
sum_of_measures_in_xml,
)
return []
return _split_file_into_staffs(semantic, svg_files, just_semantic_files)
except Exception as e:
eprint("Error while processing", file, e)
return []
def _convert_file_only_semantic(path: Path) -> list[str]:
return _convert_file(path, True)
def _convert_semantic_and_image(path: Path) -> list[str]:
return _convert_file(path, False)
def convert_lieder(only_recreate_semantic_files: bool = False) -> None:
eprint("Indexing Lieder dataset, this can up to several hours.")
_create_musicxml_and_svg_files()
music_xml_files = list(Path(os.path.join(lieder, "flat")).rglob("*.musicxml"))
with open(lieder_train_index, "w") as f:
file_number = 0
skipped_files = 0
with multiprocessing.Pool() as p:
for result in p.imap_unordered(
(
_convert_file_only_semantic
if only_recreate_semantic_files
else _convert_semantic_and_image
),
music_xml_files,
):
if len(result) > 0:
for line in result:
f.write(line)
f.flush()
else:
skipped_files += 1
file_number += 1
if file_number % 10 == 0:
eprint(
f"Processed {file_number}/{len(music_xml_files)} files,",
f"skipped {skipped_files} files",
)
eprint("Done indexing")
if __name__ == "__main__":
multiprocessing.set_start_method("spawn")
only_recreate_semantic_files = False
if "--only-semantic" in sys.argv:
only_recreate_semantic_files = True
convert_lieder(only_recreate_semantic_files)
| 9,664 | Python | .py | 214 | 36.060748 | 100 | 0.6176 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,888 | mix_datasets.py | liebharc_homr/training/transformer/mix_datasets.py | from typing import Any
import numpy as np
def _calc_number_of_files_to_take(
data_sources: list[dict[str, Any]], number_of_files: int
) -> list[int]:
files_to_take = [0 for _ in data_sources]
while number_of_files > 0:
total_weight = sum([s["weight"] for s in data_sources])
number_of_files_per_source = [
int(number_of_files * s["weight"] / total_weight) for s in data_sources
]
number_of_files_per_source[-1] = number_of_files - sum(number_of_files_per_source[:-1])
max_available_ratios = []
for i, source in enumerate(data_sources):
max_available_ratios.append(min(source["len"] / number_of_files_per_source[i], 1))
limiting_ratio = min(max_available_ratios)
number_of_files_per_source = [int(limiting_ratio * n) for n in number_of_files_per_source]
for i, n in enumerate(number_of_files_per_source):
number_of_files -= n
ds_id = data_sources[i]["id"]
files_to_take[ds_id] += n
data_sources[i]["len"] -= n
data_sources = [s for s in data_sources if s["len"] > 0]
return files_to_take
def _take_all_training_sets(indexes: list[list[str]]) -> list[str]:
train_index = []
for index in indexes:
train_index += index
np.random.shuffle(train_index)
return train_index
def mix_training_sets(
data_sources: list[list[str]], weights: list[float], number_of_files: int
) -> list[str]:
# We want the training and validation sets to be the same for each run
# if the input hasn't changed and therefore set the seed here.
np.random.seed(1720697007)
for data_source in data_sources:
np.random.shuffle(data_source)
if number_of_files < 0:
return _take_all_training_sets(data_sources)
number_of_files_per_index = _calc_number_of_files_to_take(
[
{"len": len(index), "weight": weights[i], "id": i}
for i, index in enumerate(data_sources)
],
number_of_files,
)
mixed_source = []
for i, data_source in enumerate(data_sources):
mixed_source += data_source[: number_of_files_per_index[i]]
np.random.shuffle(mixed_source)
return mixed_source
| 2,246 | Python | .py | 52 | 35.980769 | 98 | 0.632158 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,889 | train.py | liebharc_homr/training/transformer/train.py | import os
import shutil
import sys
import safetensors
import torch
import torch._dynamo
from transformers import Trainer, TrainingArguments # type: ignore
from homr.simple_logging import eprint
from homr.transformer.configs import Config
from homr.transformer.tromr_arch import TrOMR
from training.convert_grandstaff import convert_grandstaff, grandstaff_train_index
from training.convert_lieder import convert_lieder, lieder_train_index
from training.convert_primus import (
convert_primus_dataset,
primus_distorted_train_index,
primus_train_index,
)
from training.run_id import get_run_id
from training.transformer.data_loader import load_dataset
from training.transformer.mix_datasets import mix_training_sets
torch._dynamo.config.suppress_errors = True
def load_training_index(file_path: str) -> list[str]:
with open(file_path) as f:
return f.readlines()
def contains_supported_clef(semantic: str) -> bool:
if semantic.count("clef-") != 1:
return False
return "clef-G2" in semantic or "clef-F4" in semantic
def filter_for_clefs(file_paths: list[str]) -> list[str]:
result = []
for entry in file_paths:
semantic = entry.strip().split(",")[1]
if semantic == "nosymbols":
continue
with open(semantic) as f:
lines = f.readlines()
if all(contains_supported_clef(line) for line in lines):
result.append(entry)
return result
def check_data_source(all_file_paths: list[str]) -> bool:
result = True
for file_paths in all_file_paths:
paths = file_paths.strip().split(",")
for path in paths:
if path == "nosymbols":
continue
if not os.path.exists(path):
eprint(f"Index {file_paths} does not exist due to {path}")
result = False
return result
def load_and_mix_training_sets(
index_paths: list[str], weights: list[float], number_of_files: int
) -> list[str]:
if len(index_paths) != len(weights):
eprint("Error: Number of index paths and weights do not match")
sys.exit(1)
data_sources = [load_training_index(index) for index in index_paths]
if not all(check_data_source(data) for data in data_sources):
eprint("Error in datasets found")
sys.exit(1)
data_sources = [filter_for_clefs(data) for data in data_sources]
eprint(
"Total number of training files to choose from", sum([len(data) for data in data_sources])
)
return mix_training_sets(data_sources, weights, number_of_files)
script_location = os.path.dirname(os.path.realpath(__file__))
vocabulary = os.path.join(script_location, "vocabulary_semantic.txt")
git_root = os.path.join(script_location, "..", "..")
def _check_datasets_are_present() -> None:
if not os.path.exists(primus_train_index) or not os.path.exists(primus_distorted_train_index):
convert_primus_dataset()
if not os.path.exists(grandstaff_train_index):
convert_grandstaff()
if not os.path.exists(lieder_train_index):
convert_lieder()
def train_transformer(fp32: bool = False, pretrained: bool = False, resume: str = "") -> None:
number_of_files = -1
number_of_epochs = 30
resume_from_checkpoint = None
checkpoint_folder = "current_training"
if resume:
resume_from_checkpoint = os.path.join(git_root, checkpoint_folder, resume)
elif os.path.exists(os.path.join(git_root, checkpoint_folder)):
shutil.rmtree(os.path.join(git_root, checkpoint_folder))
_check_datasets_are_present()
train_index = load_and_mix_training_sets(
[primus_train_index, grandstaff_train_index, lieder_train_index],
[1.0, 1.0, 1.0],
number_of_files,
)
config = Config()
datasets = load_dataset(train_index, config, val_split=0.1)
compile_threshold = 50000
compile_model = (
number_of_files < 0 or number_of_files * number_of_epochs >= compile_threshold
) # Compiling needs time, but pays off for large datasets
if compile_model:
eprint("Compiling model")
run_id = get_run_id()
train_args = TrainingArguments(
checkpoint_folder,
torch_compile=compile_model,
overwrite_output_dir=True,
evaluation_strategy="epoch",
# TrOMR Paper page 3 specifies a rate of 1e-3, but that can cause issues with fp16 mode
learning_rate=1e-4,
optim="adamw_torch", # TrOMR Paper page 3 species an Adam optimizer
per_device_train_batch_size=16, # TrOMR Paper page 3
per_device_eval_batch_size=8,
num_train_epochs=number_of_epochs,
weight_decay=0.01,
load_best_model_at_end=False,
metric_for_best_model="loss",
logging_dir=os.path.join("logs", f"run{run_id}"),
save_strategy="epoch",
label_names=["rhythms_seq", "note_seq", "lifts_seq", "pitchs_seq"],
fp16=not fp32,
dataloader_pin_memory=True,
dataloader_num_workers=12,
)
if pretrained:
eprint("Loading pretrained model")
model = TrOMR(config)
checkpoint_file_path = config.filepaths.checkpoint
if ".safetensors" in checkpoint_file_path:
tensors = {}
with safetensors.safe_open(checkpoint_file_path, framework="pt", device=0) as f: # type: ignore
for k in f.keys():
tensors[k] = f.get_tensor(k)
model.load_state_dict(tensors, strict=False)
else:
model.load_state_dict(torch.load(checkpoint_file_path), strict=False)
else:
model = TrOMR(config)
try:
trainer = Trainer(
model,
train_args,
train_dataset=datasets["train"],
eval_dataset=datasets["validation"],
)
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
except KeyboardInterrupt:
eprint("Interrupted")
model_destination = os.path.join(git_root, "homr", "transformer", f"pytorch_model_{run_id}.pth")
torch.save(model.state_dict(), model_destination)
eprint(f"Saved model to {model_destination}")
| 6,165 | Python | .py | 147 | 34.891156 | 108 | 0.666834 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,890 | data_loader.py | liebharc_homr/training/transformer/data_loader.py | import os
import random
from typing import Any
import numpy as np
from homr.simple_logging import eprint
from homr.transformer.configs import Config
from homr.transformer.decoder import tokenize
from homr.transformer.split_merge_symbols import split_semantic_file
from homr.transformer.staff2score import readimg
from homr.type_definitions import NDArray
script_location = os.path.dirname(os.path.realpath(__file__))
os.environ["WANDB_DISABLED"] = "true"
git_root = os.path.join(script_location, "..", "..")
class DataLoader:
"""
Dataset class for the CTC PriMuS dataset and all datasets which
have been preprocessed to have the same format.
The format is an image file and a semantic file. The semantic file
contains the ground truth.
"""
gt_element_separator = "-"
PAD_COLUMN = 0
validation_dict = None
def __init__(
self,
corpus_list: list[str],
rhythm_vocab: Any,
pitch_vocab: Any,
note_vocab: Any,
lift_vocab: Any,
config: Config,
) -> None:
self.current_idx = 0
self.corpus_list = self._add_mask_steps(corpus_list)
self.rhythm_vocab = rhythm_vocab
self.pitch_vocab = pitch_vocab
self.note_vocab = note_vocab
self.lift_vocab = lift_vocab
self.config = config
def _add_mask_steps(self, corpus_list: list[str]) -> Any:
result = []
for entry in corpus_list:
image, semantic_file = entry.strip().split(",")
semantic = self._read_semantic(semantic_file)
lifts = semantic[0][0]
semantic_len = len(lifts)
# If we would have the money to do it we would want to use:
# mask_lens = range(1, semantic_len)
# Instead we construct take up to 3 random mask lengths and the full length
mask_lens = set()
mask_lens.add(semantic_len + 2)
number_of_desired_samples = 6
for _ in range(1, min(number_of_desired_samples, semantic_len)):
mask_lens.add(random.randint(1, semantic_len) + 1)
# mask_lens = range(2, semantic_len + 2) # + 2 for the BOS and EOS token
for mask_len in mask_lens:
result.append({"image": image, "semantic": semantic_file, "mask_len": mask_len})
return result
def __len__(self) -> int:
return len(self.corpus_list)
def _limit_samples(self, samples: list[int]) -> list[int]:
if len(samples) > self.config.max_seq_len - 2:
samples = samples[: self.config.max_seq_len - 2]
return samples
def _pad_array_to_max_seq_len(self, samples: list[int]) -> NDArray:
samples_padded = np.ones(self.config.max_seq_len, dtype=np.int64) * self.PAD_COLUMN
valid_len = min(self.config.max_seq_len, len(samples))
samples_padded[:valid_len] = np.array(samples[:valid_len])
return samples_padded
def _pad_rhythm(self, samples: list[int]) -> list[int]:
samples = self._limit_samples(samples)
samples.append(self.config.eos_token)
samples.insert(0, self.config.bos_token)
return samples
def _pad_samples(self, samples: list[int]) -> list[int]:
samples = self._limit_samples(samples)
samples.append(self.config.nonote_token)
samples.insert(0, self.config.nonote_token)
return samples
def _check_seq_values(self, seq: list[int], max_value: int) -> list[int]:
for value in seq:
if value >= max_value or value < 0:
raise Exception(
"ERROR: " + str(value) + " not in range of 0 to " + str(max_value) + "!"
)
return seq
def _check_index(self, idx: int) -> bool:
try:
self[idx]
return True
except Exception as e:
eprint("ERROR: " + str(e))
return False
def check(self) -> bool:
"""
Loads every entry to check if the files are available
and can be loaded correctly.
"""
has_errors = False
for i in range(len(self)):
result = self._check_index(i)
has_errors = has_errors or not result
if i % 10000 == 0:
eprint("Checked " + str(i) + "/" + str(len(self)) + " entries")
return has_errors
def _read_semantic(
self, path: str
) -> tuple[list[list[str]], list[list[str]], list[list[str]], list[list[str]]]:
if path == "nosymbols":
return [[]], [[]], [[]], [[]]
return split_semantic_file(path)
def __getitem__(self, idx: int) -> Any:
entry = self.corpus_list[idx]
sample_filepath = entry["image"]
sample_img = readimg(self.config, os.path.join(git_root, sample_filepath))
# ground truth
sample_full_filepath = entry["semantic"]
liftsymbols, pitchsymbols, rhythmsymbols, note_symbols = self._read_semantic(
sample_full_filepath
)
rhythm = tokenize(
rhythmsymbols[0],
self.rhythm_vocab,
self.config.pad_token,
"rhythm",
sample_full_filepath,
)
lifts = tokenize(
liftsymbols[0], self.lift_vocab, self.config.nonote_token, "lift", sample_full_filepath
)
pitch = tokenize(
pitchsymbols[0],
self.pitch_vocab,
self.config.nonote_token,
"pitch",
sample_full_filepath,
)
notes = tokenize(
note_symbols[0], self.note_vocab, self.config.nonote_token, "note", sample_full_filepath
)
rhythm_seq = self._check_seq_values(self._pad_rhythm(rhythm), self.config.num_rhythm_tokens)
mask = np.zeros(self.config.max_seq_len).astype(np.bool_)
mask[: entry["mask_len"]] = 1
result = {
"inputs": sample_img,
"mask": mask,
"rhythms_seq": self._pad_array_to_max_seq_len(rhythm_seq),
"note_seq": self._pad_array_to_max_seq_len(
self._check_seq_values(self._pad_samples(notes), self.config.num_note_tokens)
),
"lifts_seq": self._pad_array_to_max_seq_len(
self._check_seq_values(self._pad_samples(lifts), self.config.num_lift_tokens)
),
"pitchs_seq": self._pad_array_to_max_seq_len(
self._check_seq_values(self._pad_samples(pitch), self.config.num_pitch_tokens)
),
}
return result
def load_dataset(samples: list[str], config: Config, val_split: float = 0.0) -> dict[str, Any]:
rhythm_tokenizer_vocab = config.rhythm_vocab
pitch_tokenizer_vocab = config.pitch_vocab
note_tokenizer_vocab = config.note_vocab
lift_tokenizer_vocab = config.lift_vocab
# Train and validation split
val_idx = int(len(samples) * val_split)
training_list = samples[val_idx:]
validation_list = samples[:val_idx]
eprint(
"Training with "
+ str(len(training_list))
+ " and validating with "
+ str(len(validation_list))
)
return {
"train": DataLoader(
training_list,
rhythm_tokenizer_vocab,
pitch_tokenizer_vocab,
note_tokenizer_vocab,
lift_tokenizer_vocab,
config,
),
"train_list": training_list,
"validation": DataLoader(
validation_list,
rhythm_tokenizer_vocab,
pitch_tokenizer_vocab,
note_tokenizer_vocab,
lift_tokenizer_vocab,
config,
),
"validation_list": validation_list,
}
| 7,711 | Python | .py | 193 | 30.569948 | 100 | 0.591747 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,891 | constant_min.py | liebharc_homr/training/segmentation/constant_min.py | from training.segmentation.dense_dataset_definitions import (
DENSE_DATASET_DEFINITIONS as DEF,
)
CLASS_CHANNEL_LIST = [
DEF.STEM + DEF.ALL_RESTS_EXCEPT_LARGE + DEF.BARLINE_BETWEEN + DEF.BARLINE_END,
DEF.NOTEHEADS_ALL,
DEF.ALL_CLEFS + DEF.ALL_KEYS + DEF.ALL_ACCIDENTALS,
]
CLASS_CHANNEL_MAP = {
color: idx + 1 for idx, colors in enumerate(CLASS_CHANNEL_LIST) for color in colors
}
CHANNEL_NUM = len(CLASS_CHANNEL_LIST) + 1 # Plus 'background' and 'others' channel.
| 489 | Python | .py | 12 | 37.833333 | 87 | 0.734177 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,892 | model_utils.py | liebharc_homr/training/segmentation/model_utils.py | import json
import os
from typing import Any
from training.segmentation.types import Model
def save_model(model: Model, metadata: dict[str, Any], model_path: str) -> None:
"""Save model and metadata"""
model.export(model_path) # Creates a folder with the model, we now add metadata
write_text_to_file(
model.to_json(), os.path.join(model_path, "arch.json")
) # Save model architecture for documentation
write_text_to_file(json.dumps(metadata), os.path.join(model_path, "meta.json"))
def load_model(model_path: str) -> tuple[Model, dict[str, Any]]:
"""Load model and metadata"""
import tensorflow as tf
model = tf.saved_model.load(model_path)
with open(os.path.join(model_path, "meta.json")) as f:
metadata = json.loads(f.read())
return model, metadata
def write_text_to_file(text: str, path: str) -> None:
with open(path, "w") as f:
f.write(text)
| 925 | Python | .py | 21 | 39.428571 | 84 | 0.690848 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,893 | build_label.py | liebharc_homr/training/segmentation/build_label.py | import os
import random
import sys
import cv2
import numpy as np
from PIL import Image
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
from training.segmentation.constant_min import CHANNEL_NUM, CLASS_CHANNEL_MAP
from training.segmentation.dense_dataset_definitions import (
DENSE_DATASET_DEFINITIONS as DEF,
)
HALF_WHOLE_NOTE = DEF.NOTEHEADS_HOLLOW + DEF.NOTEHEADS_WHOLE + [42]
# ruff: noqa: C901, PLR0912
def fill_hole(gt: NDArray, tar_color: int) -> NDArray:
if tar_color not in HALF_WHOLE_NOTE:
raise ValueError("The color is not a notehead color")
tar = np.where(gt == tar_color, 1, 0).astype(np.uint8)
cnts, _ = cv2.findContours(tar, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in cnts:
x, y, w, h = cv2.boundingRect(cnt)
# Scan by row
for yi in range(y, y + h):
cur = x
cand_y = []
cand_x = []
while cur <= x + w:
if tar[yi, cur] > 0:
break
cur += 1
while cur <= x + w:
if tar[yi, cur] == 0:
break
cur += 1
while cur <= x + w:
if tar[yi, cur] > 0:
break
cand_y.append(yi)
cand_x.append(cur)
cur += 1
if cur <= x + w:
tar[np.array(cand_y), np.array(cand_x)] = 1
# Scan by column
for xi in range(x, x + w):
cur = y
cand_y = []
cand_x = []
while cur <= y + h:
if tar[cur, xi] > 0:
break
cur += 1
while cur <= y + h:
if tar[cur, xi] == 0:
break
cur += 1
while cur <= y + h:
if tar[cur, xi] > 0:
break
cand_y.append(cur)
cand_x.append(xi)
cur += 1
if cur <= y + h:
tar[np.array(cand_y), np.array(cand_x)] = 1
return tar
def build_label(
seg_path: str, strenghten_channels: dict[int, tuple[int, int]] | None = None
) -> NDArray:
img = Image.open(seg_path)
arr = np.array(img)
color_set = set(np.unique(arr))
color_set.remove(0) # Remove background color from the candidates
total_chs = CHANNEL_NUM
output = np.zeros(arr.shape + (total_chs,))
output[..., 0] = np.where(arr == 0, 1, 0)
for color in color_set:
ch = CLASS_CHANNEL_MAP.get(color, 0)
if (ch != 0) and color in HALF_WHOLE_NOTE:
note = fill_hole(arr, color)
output[..., ch] += note
elif ch != 0:
output[..., ch] += np.where(arr == color, 1, 0)
if strenghten_channels is not None:
for ch in strenghten_channels.keys():
output[..., ch] = make_symbols_stronger(output[..., ch], strenghten_channels[ch])
# The background channel is 1 if all other channels are 0
background_ch = np.ones((arr.shape[0], arr.shape[1]))
for ch in range(1, total_chs):
background_ch = np.where(output[..., ch] == 1, 0, background_ch)
output[..., 0] = background_ch
return output
def close_lines(img: cv2.typing.MatLike) -> cv2.typing.MatLike:
# Use hough transform to find lines
width = img.shape[1]
lines = cv2.HoughLinesP(
img, 1, np.pi / 180, threshold=width // 32, minLineLength=width // 16, maxLineGap=50
)
if lines is not None:
angles = []
# Draw lines
for line in lines:
x1, y1, x2, y2 = line[0]
angle = np.arctan2(y2 - y1, x2 - x1)
angles.append(angle)
mean_angle = np.mean(angles)
# Draw lines
for line in lines:
x1, y1, x2, y2 = line[0]
angle = np.arctan2(y2 - y1, x2 - x1)
is_horizontal = abs(angle - mean_angle) < np.pi / 16
if is_horizontal:
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), 255, 1) # type: ignore
else:
eprint("No lines found")
return img
def make_symbols_stronger(img: NDArray, kernel_size: tuple[int, int] = (5, 5)) -> NDArray:
"""
Dilates the symbols to make them stronger
"""
kernel = np.ones(kernel_size, np.uint8)
return cv2.dilate(img, kernel, iterations=1)
def find_example(
dataset_path: str, color: int, max_count: int = 100, mark_value: int = 200
) -> NDArray | None:
files = os.listdir(dataset_path)
random.shuffle(files)
for ff in files[:max_count]:
path = os.path.join(dataset_path, ff)
img = Image.open(path)
arr = np.array(img)
if color in arr:
return np.where(arr == color, mark_value, arr)
return None
if __name__ == "__main__":
from pathlib import Path
script_location = os.path.dirname(os.path.realpath(__file__))
git_root = Path(script_location).parent.parent.absolute()
dataset_root = os.path.join(git_root, "datasets")
seg_folder = os.path.join(dataset_root, "ds2_dense", "segmentation")
color = int(sys.argv[1])
with_background = find_example(seg_folder, color)
if with_background is None:
eprint("Found no examples")
else:
cv2.imwrite("example.png", 255 * with_background)
| 5,373 | Python | .py | 144 | 28.208333 | 93 | 0.552055 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,894 | unet.py | liebharc_homr/training/segmentation/unet.py | from typing import cast
import tensorflow as tf
import tensorflow.keras.layers as L
from tensorflow.keras import Input
from tensorflow.keras.layers import (
Activation,
Add,
Concatenate,
Conv2D,
Conv2DTranspose,
Dropout,
LayerNormalization,
)
from training.segmentation.types import Model
def conv_block(
input_tensor: tf.Tensor,
channel: int,
kernel_size: tuple[int, int],
strides: tuple[int, int] = (2, 2),
dilation_rate: int = 1,
dropout_rate: float = 0.4,
) -> tf.Tensor:
"""Convolutional encoder block of U-net.
The block is a fully convolutional block. The encoder block does not
downsample the input feature, and thus the output will have the same
dimension as the input.
"""
skip = input_tensor
input_tensor = LayerNormalization()(Activation("relu")(input_tensor))
input_tensor = Dropout(dropout_rate)(input_tensor)
input_tensor = Conv2D(
channel, kernel_size, strides=strides, dilation_rate=dilation_rate, padding="same"
)(input_tensor)
input_tensor = LayerNormalization()(Activation("relu")(input_tensor))
input_tensor = Dropout(dropout_rate)(input_tensor)
input_tensor = Conv2D(
channel, kernel_size, strides=(1, 1), dilation_rate=dilation_rate, padding="same"
)(input_tensor)
if strides != (1, 1):
skip = Conv2D(channel, (1, 1), strides=strides, padding="same")(skip)
input_tensor = Add()([input_tensor, skip])
return input_tensor
def transpose_conv_block(
input_tensor: tf.Tensor,
channel: int,
kernel_size: tuple[int, int],
strides: tuple[int, int] = (2, 2),
dropout_rate: float = 0.4,
) -> tf.Tensor:
skip = input_tensor
input_tensor = LayerNormalization()(Activation("relu")(input_tensor))
input_tensor = Dropout(dropout_rate)(input_tensor)
input_tensor = Conv2D(channel, kernel_size, strides=(1, 1), padding="same")(input_tensor)
input_tensor = LayerNormalization()(Activation("relu")(input_tensor))
input_tensor = Dropout(dropout_rate)(input_tensor)
input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(
input_tensor
)
if strides != (1, 1):
skip = Conv2DTranspose(channel, (1, 1), strides=strides, padding="same")(skip)
input_tensor = Add()([input_tensor, skip])
return input_tensor
# ruff: noqa: PLR0915
def semantic_segmentation(
win_size: int = 256,
multi_grid_layer_n: int = 1,
multi_grid_n: int = 5,
out_class: int = 2,
dropout: float = 0.4,
) -> Model:
"""Improved U-net model with Atrous Spatial Pyramid Pooling (ASPP) block."""
input_score = Input(shape=(win_size, win_size, 3), name="input_score_48")
en = Conv2D(2**7, (7, 7), strides=(1, 1), padding="same")(input_score)
en_l1 = conv_block(en, 2**7, (3, 3), strides=(2, 2))
en_l1 = conv_block(en_l1, 2**7, (3, 3), strides=(1, 1))
en_l2 = conv_block(en_l1, 2**7, (3, 3), strides=(2, 2))
en_l2 = conv_block(en_l2, 2**7, (3, 3), strides=(1, 1))
en_l2 = conv_block(en_l2, 2**7, (3, 3), strides=(1, 1))
en_l3 = conv_block(en_l2, 2**7, (3, 3), strides=(2, 2))
en_l3 = conv_block(en_l3, 2**7, (3, 3), strides=(1, 1))
en_l3 = conv_block(en_l3, 2**7, (3, 3), strides=(1, 1))
en_l3 = conv_block(en_l3, 2**7, (3, 3), strides=(1, 1))
en_l4 = conv_block(en_l3, 2**8, (3, 3), strides=(2, 2))
en_l4 = conv_block(en_l4, 2**8, (3, 3), strides=(1, 1))
en_l4 = conv_block(en_l4, 2**8, (3, 3), strides=(1, 1))
en_l4 = conv_block(en_l4, 2**8, (3, 3), strides=(1, 1))
en_l4 = conv_block(en_l4, 2**8, (3, 3), strides=(1, 1))
feature = en_l4
for _ in range(multi_grid_layer_n):
feature = LayerNormalization()(Activation("relu")(feature))
feature = Dropout(dropout)(feature)
m = LayerNormalization()(
Conv2D(2**9, (1, 1), strides=(1, 1), padding="same", activation="relu")(feature)
)
multi_grid = m
for ii in range(multi_grid_n):
m = LayerNormalization()(
Conv2D(
2**9,
(3, 3),
strides=(1, 1),
dilation_rate=2**ii,
padding="same",
activation="relu",
)(feature)
)
multi_grid = Concatenate()([multi_grid, m])
multi_grid = Dropout(dropout)(multi_grid)
feature = Conv2D(2**9, (1, 1), strides=(1, 1), padding="same")(multi_grid)
feature = LayerNormalization()(Activation("relu")(feature))
feature = Conv2D(2**8, (1, 1), strides=(1, 1), padding="same")(feature)
feature = Add()([feature, en_l4])
de_l1 = transpose_conv_block(feature, 2**7, (3, 3), strides=(2, 2))
skip = de_l1
de_l1 = LayerNormalization()(Activation("relu")(de_l1))
de_l1 = Concatenate()([de_l1, LayerNormalization()(Activation("relu")(en_l3))])
de_l1 = Dropout(dropout)(de_l1)
de_l1 = Conv2D(2**7, (1, 1), strides=(1, 1), padding="same")(de_l1)
de_l1 = Add()([de_l1, skip])
de_l2 = transpose_conv_block(de_l1, 2**7, (3, 3), strides=(2, 2))
skip = de_l2
de_l2 = LayerNormalization()(Activation("relu")(de_l2))
de_l2 = Concatenate()([de_l2, LayerNormalization()(Activation("relu")(en_l2))])
de_l2 = Dropout(dropout)(de_l2)
de_l2 = Conv2D(2**7, (1, 1), strides=(1, 1), padding="same")(de_l2)
de_l2 = Add()([de_l2, skip])
de_l3 = transpose_conv_block(de_l2, 2**7, (3, 3), strides=(2, 2))
skip = de_l3
de_l3 = LayerNormalization()(Activation("relu")(de_l3))
de_l3 = Concatenate()([de_l3, LayerNormalization()(Activation("relu")(en_l1))])
de_l3 = Dropout(dropout)(de_l3)
de_l3 = Conv2D(2**7, (1, 1), strides=(1, 1), padding="same")(de_l3)
de_l3 = Add()([de_l3, skip])
de_l4 = transpose_conv_block(de_l3, 2**7, (3, 3), strides=(2, 2))
de_l4 = LayerNormalization()(Activation("relu")(de_l4))
de_l4 = Dropout(dropout)(de_l4)
out = Conv2D(
out_class, (1, 1), strides=(1, 1), activation="softmax", padding="same", name="prediction"
)(de_l4)
return tf.keras.Model(inputs=input_score, outputs=out)
def my_conv_block(
inp: tf.Tensor,
kernels: int,
kernel_size: tuple[int, int] = (3, 3),
strides: tuple[int, int] = (1, 1),
) -> tf.Tensor:
inp = L.Conv2D(kernels, kernel_size, strides=strides, padding="same", dtype=tf.float32)(inp)
out = L.Activation("relu")(L.LayerNormalization()(inp))
out = L.SeparableConv2D(kernels, kernel_size, padding="same", dtype=tf.float32)(out)
out = L.Activation("relu")(L.LayerNormalization()(out))
out = L.Dropout(0.3)(out)
out = L.Add()([inp, out])
out = L.Activation("relu")(L.LayerNormalization()(out))
return cast(tf.Tensor, out)
def my_conv_small_block(
inp: tf.Tensor,
kernels: int,
kernel_size: tuple[int, int] = (3, 3),
strides: tuple[int, int] = (1, 1),
) -> tf.Tensor:
inp = L.Conv2D(kernels, kernel_size, strides=strides, padding="same", dtype=tf.float32)(inp)
out = L.Activation("relu")(L.LayerNormalization()(inp))
out = L.Dropout(0.3)(out)
out = L.Add()([inp, out])
out = L.Activation("relu")(L.LayerNormalization()(out))
return cast(tf.Tensor, out)
def my_trans_conv_block(
inp: tf.Tensor,
kernels: int,
kernel_size: tuple[int, int] = (3, 3),
strides: tuple[int, int] = (1, 1),
) -> tf.Tensor:
inp = L.Conv2DTranspose(
kernels, kernel_size, strides=strides, padding="same", dtype=tf.float32
)(inp)
out = L.Conv2D(kernels, kernel_size, padding="same", dtype=tf.float32)(inp)
out = L.Activation("relu")(L.LayerNormalization()(out))
out = L.Dropout(0.3)(out)
out = L.Add()([inp, out])
out = L.Activation("relu")(L.LayerNormalization()(out))
return out
def u_net(win_size: int = 288, out_class: int = 3) -> Model:
inp = L.Input(shape=(win_size, win_size, 3))
tensor = L.SeparableConv2D(128, (3, 3), activation="relu", padding="same")(inp)
l1 = my_conv_small_block(tensor, 64, (3, 3), strides=(2, 2))
l1 = my_conv_small_block(l1, 64, (3, 3))
l1 = my_conv_small_block(l1, 64, (3, 3))
skip = my_conv_small_block(l1, 128, (3, 3), strides=(2, 2))
l2 = my_conv_small_block(skip, 128, (3, 3))
l2 = my_conv_small_block(l2, 128, (3, 3))
l2 = my_conv_small_block(l2, 128, (3, 3))
l2 = my_conv_small_block(l2, 128, (3, 3))
l2 = L.Concatenate()([skip, l2])
l3 = my_conv_small_block(l2, 256, (3, 3))
l3 = my_conv_small_block(l3, 256, (3, 3))
l3 = my_conv_small_block(l3, 256, (3, 3))
l3 = my_conv_small_block(l3, 256, (3, 3))
l3 = my_conv_small_block(l3, 256, (3, 3))
l3 = L.Concatenate()([l2, l3])
bot = my_conv_small_block(l3, 256, (3, 3), strides=(2, 2))
st1 = L.SeparableConv2D(256, (3, 3), padding="same", dtype=tf.float32)(bot)
st1 = L.Activation("relu")(L.LayerNormalization()(st1))
st2 = L.SeparableConv2D(256, (3, 3), dilation_rate=(2, 2), padding="same", dtype=tf.float32)(
bot
)
st2 = L.Activation("relu")(L.LayerNormalization()(st2))
st3 = L.SeparableConv2D(256, (3, 3), dilation_rate=(6, 6), padding="same", dtype=tf.float32)(
bot
)
st3 = L.Activation("relu")(L.LayerNormalization()(st3))
st4 = L.SeparableConv2D(256, (3, 3), dilation_rate=(12, 12), padding="same", dtype=tf.float32)(
bot
)
st4 = L.Activation("relu")(L.LayerNormalization()(st4))
st = L.Concatenate()([st1, st2, st3, st4])
st = L.Conv2D(256, (1, 1), padding="same", dtype=tf.float32)(st)
norm = L.Activation("relu")(L.LayerNormalization()(st))
bot = my_trans_conv_block(norm, 256, (3, 3), strides=(2, 2))
tl3 = L.Conv2D(128, (3, 3), padding="same", dtype=tf.float32)(bot)
tl3 = L.Activation("relu")(L.LayerNormalization()(tl3))
tl3 = L.Concatenate()([tl3, l3])
tl3 = my_conv_small_block(tl3, 128, (3, 3))
tl3 = my_trans_conv_block(tl3, 128, (3, 3))
# Head 1
tl2 = L.Conv2D(128, (3, 3), padding="same", dtype=tf.float32)(tl3)
tl2 = L.Activation("relu")(L.LayerNormalization()(tl2))
tl2 = L.Concatenate()([tl2, l2])
tl2 = my_conv_small_block(tl2, 128, (3, 3))
tl2 = my_trans_conv_block(tl2, 128, (3, 3), strides=(2, 2))
tl1 = L.Conv2D(128, (3, 3), padding="same", dtype=tf.float32)(tl2)
tl1 = L.Activation("relu")(L.LayerNormalization()(tl1))
tl1 = L.Concatenate()([tl1, l1])
tl1 = my_conv_small_block(tl1, 128, (3, 3))
tl1 = my_trans_conv_block(tl1, 128, (3, 3), strides=(2, 2))
out1 = L.Conv2D(out_class, (1, 1), activation="softmax", padding="same", dtype=tf.float32)(tl1)
return tf.keras.Model(inputs=inp, outputs=out1)
| 10,695 | Python | .py | 236 | 39.525424 | 99 | 0.613235 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,895 | train.py | liebharc_homr/training/segmentation/train.py | import os
import random
from collections.abc import Callable, Generator, Iterator
from multiprocessing import Process, Queue
from typing import Any, cast
import augly.image as imaugs # type: ignore
import cv2
import numpy as np
import tensorflow as tf
from PIL import Image, ImageColor, ImageEnhance
from homr.simple_logging import eprint
from homr.type_definitions import NDArray
from training.segmentation.build_label import build_label, close_lines
from training.segmentation.constant_min import CHANNEL_NUM
from training.segmentation.types import Model
from training.segmentation.unet import semantic_segmentation, u_net
def monkey_patch_float_for_imaugs() -> None:
"""
Monkey path workaround of np.float for imaugs
"""
np.float = float # type: ignore # ruff: noqa: E731
def get_cvc_data_paths(dataset_path: str) -> list[list[str]]:
if not os.path.exists(dataset_path):
raise FileNotFoundError(f"{dataset_path} not found, download the dataset first.")
dirs = [
"curvature",
"ideal",
"interrupted",
"kanungo",
"rotated",
"staffline-thickness-variation-v1",
"staffline-thickness-variation-v2",
"staffline-y-variation-v1",
"staffline-y-variation-v2",
"thickness-ratio",
"typeset-emulation",
"whitespeckles",
]
data = []
for dd in dirs:
dir_path = os.path.join(dataset_path, dd)
folders = os.listdir(dir_path)
for folder in folders:
data_path = os.path.join(dir_path, folder)
imgs = os.listdir(os.path.join(data_path, "image"))
for img in imgs:
img_path = os.path.join(data_path, "image", img)
staffline = os.path.join(data_path, "gt", img)
symbol_path = os.path.join(data_path, "symbol", img)
data.append([img_path, staffline, symbol_path])
return data
def get_deep_score_data_paths(dataset_path: str) -> list[list[str]]:
if not os.path.exists(dataset_path):
raise FileNotFoundError(f"{dataset_path} not found, download the dataset first.")
imgs = os.listdir(os.path.join(dataset_path, "images"))
paths = []
for img in imgs:
image_path = os.path.join(dataset_path, "images", img)
seg_path = os.path.join(dataset_path, "segmentation", img.replace(".png", "_seg.png"))
paths.append([image_path, seg_path])
return paths
def apply_gradient_contrast(
image: Image.Image, start_contrast: float = 1.0, end_contrast: float = 0.7
) -> Image.Image:
width, height = image.size
gradient_array = np.linspace(start_contrast, end_contrast, num=width * height).reshape(
(height, width)
)
gradient = Image.fromarray((gradient_array * 255).astype(np.uint8), mode="L")
# Apply uniform contrast reduction
contrast_factor = end_contrast
enhancer = ImageEnhance.Contrast(image)
contrast_reduced_image = enhancer.enhance(contrast_factor)
# Blend the original image with the contrast-reduced image using the gradient mask
blended_image = Image.composite(image, contrast_reduced_image, gradient)
return blended_image
def preprocess_image(img_path: str, reduce_contrast: bool = False) -> Image.Image:
image = Image.open(img_path).convert("1")
if image.mode == "1":
# The input image contains only one channel.
arr = np.array(image)
out = np.zeros(arr.shape + (3,), dtype=np.uint8)
bg_is_white = np.count_nonzero(arr) > (arr.size * 0.7)
bg_idx = np.where(arr == bg_is_white)
# Change background color
hue = random.randint(19, 60)
sat = random.randint(0, 15)
val = random.randint(70, 100)
color = ImageColor.getrgb(f"hsv({hue}, {sat}%, {val}%)")
out[bg_idx[0], bg_idx[1]] = color
image = Image.fromarray(out)
aug_image = image
if reduce_contrast:
# Reduce contrast randomly
aug_image = apply_gradient_contrast(
aug_image, random.uniform(0.3, 1.0), random.uniform(0.3, 1.0)
)
# Color jitter
bright = (7 + random.randint(0, 6)) / 10 # 0.7~1.3
saturation = (5 + random.randint(0, 7)) / 10 # 0.5~1.2
contrast = (5 + random.randint(0, 10)) / 10 # 0.5~1.5
aug_image = imaugs.color_jitter(
aug_image, brightness_factor=bright, saturation_factor=saturation, contrast_factor=contrast
)
# Blur
rad = random.choice(np.arange(0.1, 2.1, 0.5))
aug_image = imaugs.blur(aug_image, radius=rad)
# Pixel shuffle, kind of adding noise
factor = random.choice(np.arange(0.1, 0.26, 0.05))
aug_image = imaugs.shuffle_pixels(aug_image, factor=factor)
# Image quality
qa = random.randint(0, 100)
aug_image = imaugs.encoding_quality(aug_image, quality=qa)
# Pixelize (pretty similar to blur?)
rat = random.randint(3, 10) / 10
aug_image = imaugs.pixelization(aug_image, ratio=rat)
return aug_image
def batch_transform(
img: Image.Image | NDArray, trans_func: Callable[[Image.Image], Image.Image]
) -> NDArray:
if isinstance(img, Image.Image):
return np.array(trans_func(img))
if not isinstance(img, np.ndarray):
raise ValueError("Input image should be either PIL.Image or np.ndarray")
color_channels = 3
if len(img.shape) != color_channels:
raise ValueError("Input image should be 3D array with shape (h, w, ch)")
ch_num = img.shape[2]
result = []
for i in range(ch_num):
tmp_img = Image.fromarray(img[..., i].astype(np.uint8))
tmp_img = trans_func(tmp_img)
result.append(np.array(tmp_img))
return np.dstack(result)
class MultiprocessingDataLoader:
def __init__(self, num_worker: int):
self._queue: Queue[list[Any]] = Queue(maxsize=20)
self._dist_queue: Queue[list[str]] = Queue(maxsize=30)
self._process_pool = []
for _ in range(num_worker):
processor = Process(target=self._preprocess_image)
processor.daemon = True
self._process_pool.append(processor)
self._pdist = Process(target=self._distribute_process)
self._pdist.daemon = True
def _start_processes(self) -> None:
if not self._pdist.is_alive():
self._pdist.start()
for process in self._process_pool:
if not process.is_alive():
process.start()
def _terminate_processes(self) -> None:
self._pdist.terminate()
for process in self._process_pool:
process.terminate()
def _distribute_process(self) -> None:
pass
def _preprocess_image(self) -> None:
pass
class DataLoader(MultiprocessingDataLoader):
def __init__(
self,
feature_files: list[list[str]],
win_size: int = 256,
num_samples: int = 100,
min_step_size: float = 0.2,
num_worker: int = 4,
):
super().__init__(num_worker)
self.feature_files = feature_files
random.shuffle(self.feature_files)
self.win_size = win_size
self.num_samples = num_samples
if isinstance(min_step_size, float):
min_step_size = max(min(abs(min_step_size), 1), 0.01)
self.min_step_size = round(win_size * min_step_size)
else:
self.min_step_size = max(min(abs(min_step_size), win_size), 2)
self.file_idx = 0
def _distribute_process(self) -> None:
while True:
paths = self.feature_files[self.file_idx]
self._dist_queue.put(paths)
self.file_idx += 1
if self.file_idx == len(self.feature_files):
random.shuffle(self.feature_files)
self.file_idx = 0
def _preprocess_image(self) -> None:
while True:
if not self._queue.full():
inp_img_path, staff_img_path, symbol_img_path = self._dist_queue.get()
# Preprocess image with transformations that won't change view.
image = preprocess_image(inp_img_path, reduce_contrast=True)
# Random resize
ratio = random.choice(np.arange(0.2, 1.21, 0.1))
tar_w = int(ratio * image.size[0])
tar_h = int(ratio * image.size[1])
image = imaugs.resize(image, width=tar_w, height=tar_h)
staff_img_array = cv2.imread(staff_img_path)
staff_img_array = cv2.cvtColor(staff_img_array, cv2.COLOR_BGR2GRAY).astype(np.uint8)
staff_img_array = close_lines(staff_img_array)
staff_img = Image.fromarray(staff_img_array)
staff_img = imaugs.resize(staff_img, width=tar_w, height=tar_h)
staff_img = imaugs.resize(staff_img_path, width=tar_w, height=tar_h)
symbol_img = imaugs.resize(symbol_img_path, width=tar_w, height=tar_h)
# Random perspective transform
seed = random.randint(0, 1000)
monkey_patch_float_for_imaugs()
random_rotation = random.uniform(-5, 5)
def perspect_trans(
img: Image.Image, seed: int = seed, random_rotation: float = random_rotation
) -> Any:
rotated = img.rotate(random_rotation)
return imaugs.perspective_transform(rotated, seed=seed, sigma=70)
image_trans = np.array(perspect_trans(image)) # RGB image
staff_img_trans = np.array(perspect_trans(staff_img)) # 1-bit mask
symbol_img_trans = np.array(perspect_trans(symbol_img)) # 1-bit mask
staff_img_trans = np.where(staff_img_trans, 1, 0)
symbol_img_trans = np.where(symbol_img_trans, 1, 0)
self._queue.put([image_trans, staff_img_trans, symbol_img_trans, ratio])
def __iter__(self) -> Iterator[tuple[NDArray, NDArray]]:
samples = 0
self._start_processes()
while samples < self.num_samples:
image, staff_img, symbol_img, ratio = self._queue.get()
start_x, start_y = 0, 0
max_y = image.shape[0] - self.win_size
max_x = image.shape[1] - self.win_size
while (start_x < max_x) and (start_y < max_y):
y_range = range(start_y, start_y + self.win_size)
x_range = range(start_x, start_x + self.win_size)
index = np.ix_(y_range, x_range)
# Can't use two 'range' inside the numpy array for indexing.
# Details refer to the following:
# https://stackoverflow.com/questions/30020143/indexing-slicing-a-2d-numpy-array-using-the-range-arange-function-as-the-argumen
feat = image[index]
staff = staff_img[index]
symbol = symbol_img[index]
neg = np.ones_like(staff) - staff - symbol
label = np.stack([neg, staff, symbol], axis=-1)
yield feat, label
y_step = random.randint(
round(self.min_step_size * ratio), round(self.win_size * ratio)
)
x_step = random.randint(
round(self.min_step_size * ratio), round(self.win_size * ratio)
)
start_y = min(start_y + y_step, max_y)
start_x = min(start_x + x_step, max_x)
self._terminate_processes()
def get_dataset(
self, batch_size: int
) -> tf.data.Dataset[Generator[tuple[NDArray, NDArray], None, None]]:
def gen_wrapper() -> Generator[tuple[NDArray, NDArray], None, None]:
yield from self
return (
tf.data.Dataset.from_generator(
gen_wrapper,
output_signature=(
tf.TensorSpec(
shape=(self.win_size, self.win_size, 3), dtype=tf.uint8, name=None
),
tf.TensorSpec(
shape=(self.win_size, self.win_size, 3), dtype=tf.float32, name=None
),
),
)
.batch(batch_size, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE)
)
class DsDataLoader(MultiprocessingDataLoader):
def __init__(
self,
feature_files: list[list[str]],
win_size: int = 256,
num_samples: int = 100,
step_size: float = 0.5,
num_worker: int = 4,
):
super().__init__(num_worker)
self.feature_files = feature_files
random.shuffle(self.feature_files)
self.win_size = win_size
self.num_samples = num_samples
if isinstance(step_size, float):
step_size = max(abs(step_size), 0.01)
self.step_size = round(win_size * step_size)
else:
self.step_size = max(abs(step_size), 2)
self.file_idx = 0
def _distribute_process(self) -> None:
while True:
paths = self.feature_files[self.file_idx]
self._dist_queue.put(paths)
self.file_idx += 1
if self.file_idx == len(self.feature_files):
random.shuffle(self.feature_files)
self.file_idx = 0
def _preprocess_image(self) -> None:
while True:
if not self._queue.full():
inp_img_path, seg_img_path = self._dist_queue.get()
# Preprocess image with transformations that won't change view.
image = preprocess_image(inp_img_path)
strengthen_channels = {
1: (5, 5),
}
label = build_label(seg_img_path, strenghten_channels=strengthen_channels)
# Random resize
ratio = random.choice(np.arange(0.2, 1.21, 0.1))
tar_w = int(ratio * image.size[0])
tar_h = int(ratio * image.size[1])
def trans_func(img: Image.Image, width: int = tar_w, height: int = tar_h) -> Any:
return imaugs.resize(img, width=width, height=height)
image_trans = batch_transform(image, trans_func)
label_trans = batch_transform(label, trans_func)
# Random perspective transform
seed = random.randint(0, 1000)
monkey_patch_float_for_imaugs()
random_rotation = random.uniform(-5, 5)
def perspect_trans(
img: Image.Image, seed: int = seed, random_rotation: float = random_rotation
) -> Any:
rotated = img.rotate(random_rotation)
return imaugs.perspective_transform(rotated, seed=seed, sigma=70)
image_arr = np.array(batch_transform(image_trans, perspect_trans)) # RGB image
label_arr = np.array(batch_transform(label_trans, perspect_trans))
self._queue.put([image_arr, label_arr, ratio])
def __iter__(self) -> Iterator[tuple[NDArray, NDArray]]:
samples = 0
self._start_processes()
while samples < self.num_samples:
image, label, ratio = self._queue.get()
# Discard bottom spaces that has no contents.
staff = label[..., 1]
yidx, _ = np.where(staff > 0)
if len(yidx) > 0:
max_y = min(np.max(yidx) + 100, image.shape[0])
else:
max_y = image.shape[0]
max_y = max_y - self.win_size
max_x = image.shape[1] - self.win_size
grid_x = range(0, max_x, round(self.step_size * ratio))
grid_y = range(0, max_y, round(self.step_size * ratio))
meshgrid = np.meshgrid(grid_x, grid_y, indexing="ij")
coords = np.dstack(meshgrid).reshape(-1, 2)
np.random.shuffle(coords)
for start_x, start_y in coords:
y_range = range(start_y, start_y + self.win_size)
x_range = range(start_x, start_x + self.win_size)
index = np.ix_(y_range, x_range)
# Can't use two 'range' inside the numpy array for indexing.
# Details refer to the following:
# https://stackoverflow.com/questions/30020143/indexing-slicing-a-2d-numpy-array-using-the-range-arange-function-as-the-argumen
feat = image[index]
ll = label[index]
yield feat, ll
self._terminate_processes()
def get_dataset(
self, batch_size: int
) -> tf.data.Dataset[Generator[tuple[NDArray, NDArray], None, None]]:
def gen_wrapper() -> Generator[tuple[NDArray, NDArray], None, None]:
yield from self
return (
tf.data.Dataset.from_generator(
gen_wrapper,
output_signature=(
tf.TensorSpec(
shape=(self.win_size, self.win_size, 3), dtype=tf.uint8, name=None
),
tf.TensorSpec(
shape=(self.win_size, self.win_size, CHANNEL_NUM),
dtype=tf.float32,
name=None,
),
),
)
.batch(batch_size, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE)
)
class WarmUpLearningRate(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self,
init_lr: float = 0.1,
warm_up_steps: int = 1000,
decay_step: int = 3000,
decay_rate: float = 0.25,
min_lr: float = 1e-8,
):
self.init_lr = init_lr
self.warm_up_steps = warm_up_steps
self.decay_step = decay_step
self.decay_rate = decay_rate
self.min_lr = min_lr
self.warm_step_size = (init_lr - min_lr) / warm_up_steps
def __call__(self, step: int | tf.Tensor) -> float | tf.Tensor:
step = tf.cast(step, tf.float32)
warm_lr = self.min_lr + self.warm_step_size * step
offset = step - self.warm_up_steps
cycle = offset // self.decay_step
start_lr = self.init_lr * tf.pow(self.decay_rate, cycle)
end_lr = start_lr * self.decay_rate
step_size = (start_lr - end_lr) / self.decay_step
lr = start_lr - (offset - cycle * self.decay_step) * step_size
true_lr = tf.where(offset > 0, lr, warm_lr)
result = tf.maximum(true_lr, self.min_lr)
return cast(float | tf.Tensor, result)
def get_config(self) -> dict[str, Any]:
return {
"warm_up_steps": self.warm_up_steps,
"decay_step": self.decay_step,
"decay_rate": self.decay_rate,
"min_lr": self.min_lr,
}
def train_model(
dataset_path: str,
train_val_split: float = 0.1,
learning_rate: float = 5e-4,
epochs: int = 15,
steps: int = 1000,
batch_size: int = 8,
val_steps: int = 200,
val_batch_size: int = 8,
early_stop: int = 8,
data_model: str = "segnet",
) -> Model:
if data_model == "segnet":
feat_files = get_deep_score_data_paths(dataset_path)
else:
feat_files = get_cvc_data_paths(dataset_path)
random.shuffle(feat_files)
split_idx = round(train_val_split * len(feat_files))
train_files = feat_files[split_idx:]
val_files = feat_files[:split_idx]
eprint(f"Loading dataset. Train/validation: {len(train_files)}/{len(val_files)}")
if data_model == "segnet":
win_size = 288
train_data = DsDataLoader(
train_files, win_size=win_size, num_samples=epochs * steps * batch_size
).get_dataset(batch_size)
val_data = DsDataLoader(
val_files, win_size=win_size, num_samples=epochs * val_steps * val_batch_size
).get_dataset(val_batch_size)
model = u_net(win_size=win_size, out_class=CHANNEL_NUM)
else:
win_size = 256
train_data = DataLoader(
train_files, win_size=win_size, num_samples=epochs * steps * batch_size
).get_dataset(batch_size)
val_data = DataLoader(
val_files, win_size=win_size, num_samples=epochs * val_steps * val_batch_size
).get_dataset(val_batch_size)
model = semantic_segmentation(win_size=256, out_class=3)
eprint("Initializing model")
optim = tf.keras.optimizers.Adam(learning_rate=WarmUpLearningRate(learning_rate))
loss = tf.keras.losses.CategoricalFocalCrossentropy()
model.compile(optimizer=optim, loss=loss, metrics=["accuracy"])
callbacks = [
tf.keras.callbacks.EarlyStopping(patience=early_stop, monitor="val_accuracy"),
tf.keras.callbacks.ModelCheckpoint(
"seg_unet.keras", save_weights_only=False, monitor="val_accuracy"
),
]
eprint("Start training")
try:
model.fit(
train_data,
validation_data=val_data,
epochs=epochs,
steps_per_epoch=steps,
validation_steps=val_steps,
callbacks=callbacks,
)
return model
except Exception as e:
eprint(e)
return model
| 21,226 | Python | .py | 477 | 33.926625 | 143 | 0.586347 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,896 | dense_dataset_definitions.py | liebharc_homr/training/segmentation/dense_dataset_definitions.py | class Symbols:
BACKGROUND = [0]
LEDGERLINE = [2]
BARLINE_BETWEEN = [3]
BARLINE_END = [4]
ALL_BARLINES = BARLINE_BETWEEN + BARLINE_END
REPEAT_DOTS = [7]
G_GLEF = [10]
C_CLEF = [11, 12]
F_CLEF = [13]
ALL_CLEFS = G_GLEF + C_CLEF + F_CLEF
NUMBERS = [19, 20]
TIME_SIGNATURE_SUBSET = [21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34]
TIME_SIGNATURE = TIME_SIGNATURE_SUBSET + [31, 32] # Oemer hasn't used these in the past
NOTEHEAD_FULL_ON_LINE = [35]
UNKNOWN = [
36,
38,
40,
128,
143,
144,
148,
150,
157,
159,
160,
161,
162,
163,
164,
167,
170,
171,
]
NOTEHEAD_FULL_BETWEEN_LINES = [37]
NOTEHEAD_HOLLOW_ON_LINE = [39]
NOTEHEAD_HOLLOW_BETWEEN_LINE = [41]
WHOLE_NOTE_ON_LINE = [43]
WHOLE_NOTE_BETWEEN_LINE = [45]
DOUBLE_WHOLE_NOTE_ON_LINE = [47]
DOUBLE_WHOLE_NOTE_BETWEEN_LINE = [49]
NOTEHEADS_SOLID = NOTEHEAD_FULL_ON_LINE + NOTEHEAD_FULL_BETWEEN_LINES
NOTEHEADS_HOLLOW = NOTEHEAD_HOLLOW_ON_LINE + NOTEHEAD_HOLLOW_BETWEEN_LINE
NOTEHEADS_WHOLE = (
WHOLE_NOTE_ON_LINE
+ WHOLE_NOTE_BETWEEN_LINE
+ DOUBLE_WHOLE_NOTE_ON_LINE
+ DOUBLE_WHOLE_NOTE_BETWEEN_LINE
)
NOTEHEADS_ALL = (
NOTEHEAD_FULL_ON_LINE
+ NOTEHEAD_FULL_BETWEEN_LINES
+ NOTEHEAD_HOLLOW_ON_LINE
+ NOTEHEAD_HOLLOW_BETWEEN_LINE
+ WHOLE_NOTE_ON_LINE
+ WHOLE_NOTE_BETWEEN_LINE
+ DOUBLE_WHOLE_NOTE_ON_LINE
+ DOUBLE_WHOLE_NOTE_BETWEEN_LINE
)
DOT = [51]
STEM = [52]
TREMOLO = [53, 54, 55, 56]
FLAG_DOWN = [58, 60, 61, 62, 63]
FLAG_UP = [64, 66, 67, 68, 69]
FLAT = [70]
NATURAL = [72]
SHARP = [74]
DOUBLE_SHARP = [76]
ALL_ACCIDENTALS = FLAT + NATURAL + SHARP + DOUBLE_SHARP
KEY_FLAT = [78]
KEY_NATURAL = [79]
KEY_SHARP = [80]
ALL_KEYS = KEY_FLAT + KEY_NATURAL + KEY_SHARP
ACCENT_ABOVE = [81]
ACCENT_BELOW = [82]
STACCATO_ABOVE = [83]
STACCATO_BELOW = [84]
TENUTO_ABOVE = [85]
TENUTO_BELOW = [86]
STACCATISSIMO_ABOVE = [87]
STACCATISSIMO_BELOW = [88]
MARCATO_ABOVE = [89]
MARCATO_BELOW = [90]
FERMATA_ABOVE = [91]
FERMATA_BELOW = [92]
BREATH_MARK = [93]
REST_LARGE = [95]
REST_LONG = [96]
REST_BREVE = [97]
REST_FULL = [98]
REST_QUARTER = [99]
REST_EIGHTH = [100]
REST_SIXTEENTH = [101]
REST_THIRTY_SECOND = [102]
REST_SIXTY_FOURTH = [103]
REST_ONE_HUNDRED_TWENTY_EIGHTH = [104]
ALL_RESTS_EXCEPT_LARGE = (
REST_LONG
+ REST_BREVE
+ REST_FULL
+ REST_QUARTER
+ REST_EIGHTH
+ REST_SIXTEENTH
+ REST_THIRTY_SECOND
+ REST_SIXTY_FOURTH
+ REST_ONE_HUNDRED_TWENTY_EIGHTH
)
ALL_RESTS = ALL_RESTS_EXCEPT_LARGE
TRILL = [127]
GRUPPETO = [129]
MORDENT = [130]
DOWN_BOW = [131]
UP_BOW = [132]
SYMBOL = [133, 134, 135, 138, 139, 141, 142]
TUPETS = [136, 137, 149, 151, 152, 153, 154, 155, 156]
SLUR_AND_TIE = [145, 147]
BEAM = [146]
STAFF = [165]
DENSE_DATASET_DEFINITIONS = Symbols()
| 3,238 | Python | .py | 120 | 20.733333 | 92 | 0.569641 | liebharc/homr | 8 | 3 | 2 | AGPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,897 | People-sDailyEpubCreator.py | ARRray0-o_People-sDailyEpubCreator/People-sDailyEpubCreator.py | from lxml import html
from datetime import datetime, timedelta
from ebooklib import epub
import requests
import os
import re
from urllib.parse import quote
import webbrowser
def fetch_articles(custom_date=None):
articles_data = []
today = custom_date if custom_date else datetime.now().strftime('%Y-%m/%d')
base_url = f'http://paper.people.com.cn/rmrb/html/{today}/'
section_counter = 0
unique_articles = set()
try:
response = requests.get(base_url + 'nbs.D110000renmrb_01.htm')
response.raise_for_status()
except requests.HTTPError:
print('页面未找到,请确认目标日期的《人民日报》(电子版)是否已发行,或检查系统日期。')
return articles_data, today
except requests.RequestException as e:
print(f'网络请求出错: {e}')
return articles_data, today
doc = html.fromstring(response.content)
sections = doc.xpath('/html/body/div[2]/div[2]/div[2]/div/div/a')
for section in sections:
section_counter += 1
article_counter = 0
section_name = section.text_content().split(':')[-1]
section_url = base_url + section.get('href').lstrip('./')
try:
response = requests.get(section_url)
response.raise_for_status()
except requests.RequestException as e:
print(f'获取文章链接时出错: {e}')
continue
doc = html.fromstring(response.content)
articles = doc.xpath('/html/body/div[2]/div[2]/div[3]/ul/li/a')
for article in articles:
article_counter += 1
article_title = article.text_content().strip()
article_url = base_url + article.get('href')
try:
response = requests.get(article_url)
response.raise_for_status()
except requests.RequestException as e:
print(f'获取文章内容时出错: {e}')
continue
doc = html.fromstring(response.content)
article_paragraphs = doc.xpath('//div[@id="ozoom"]/p')
article_content = ''.join([f'<p>{html.tostring(p, encoding=str, method="html", with_tail=False).strip()}</p>' for p in article_paragraphs])
article_signature = (section_name, article_title, article_content)
if article_signature in unique_articles:
continue
unique_articles.add(article_signature)
filename = f'{section_counter}_{article_counter}.xhtml'
articles_data.append((section_name, article_title, article_content, filename))
return articles_data, today
def parse_date_input(user_input):
current_year = datetime.now().year
try:
if user_input == "":
return datetime.now().strftime('%Y-%m/%d'), False
if user_input.startswith("-") and user_input[1:].isdigit():
days_ago = int(user_input[1:])
target_date = datetime.now() - timedelta(days=days_ago)
return target_date.strftime('%Y-%m/%d'), True
parts = user_input.split(" ")
if len(parts) == 3 and all(part.isdigit() for part in parts):
year = int(parts[0]) if len(parts[0]) == 4 else int("20" + parts[0])
month = int(parts[1])
day = int(parts[2])
elif len(parts) == 2 and all(part.isdigit() for part in parts):
year = current_year
month = int(parts[0])
day = int(parts[1])
elif len(parts) == 1 and parts[0].isdigit():
input_weekday = int(parts[0])
if input_weekday < 1 or input_weekday > 7:
raise ValueError("星期数必须在1到7之间。")
weekday = (input_weekday - 1) % 7
today = datetime.now()
today_weekday = today.weekday()
day_diff = (today_weekday - weekday) % 7
target_date = today - timedelta(days=day_diff) if day_diff != 0 else today
return target_date.strftime('%Y-%m/%d'), True
else:
raise ValueError("输入格式错误,请按照规定格式输入日期。")
return datetime(year, month, day).strftime('%Y-%m/%d'), True
except ValueError as e:
return None, False
def create_epub(articles_data, today):
book = epub.EpubBook()
book.set_title(f'人民日报_{today.replace("/", "-")}')
sections = {}
spine = ['nav']
toc = []
for section_name, article_title, content, filename in articles_data:
if section_name not in sections:
sections[section_name] = {
'section': epub.EpubHtml(title=section_name, file_name=f'{section_name}.xhtml', lang='zh', content=f'<h1>{section_name}</h1>'),
'articles': []
}
book.add_item(sections[section_name]['section'])
article_id = f'article_{filename[:-6]}'
sub_section = epub.EpubHtml(title=article_title, file_name=filename, content=f'<h2>{article_title}</h2>{content}', lang='zh')
sections[section_name]['articles'].append(sub_section)
book.add_item(sub_section)
for section_info in sections.values():
spine.append(section_info['section'])
toc.append((section_info['section'], section_info['articles']))
for article in section_info['articles']:
spine.append(article)
book.spine = spine
book.toc = toc
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
book.add_item(epub.EpubItem(uid="style_nav", file_name="style/nav.css", media_type="text/css", content='BODY {color: black;}'))
epub_filename = f'人民日报_{today.replace("/", "-")}.epub'
epub.write_epub(epub_filename, book, {})
def format_date_chinese(date):
weekdays = ["星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"]
year = date.year
month = date.month
day = date.day
weekday = weekdays[date.weekday()]
return f"{year}年{month}月{day}日{weekday}"
if __name__ == '__main__':
guide_url = "https://flowus.cn/share/c70a84fe-a3ba-450d-ba13-7e4ee855545b"
help_url = "https://flowus.cn/pdec/2a91874c-fec4-43a2-8b1d-cbfc27720db1"
first_run = True
while True:
if first_run:
prompt_message = "本工具作为一个开源项目在GPL-3.0许可下发布,输入g后回车打开说明网页。\n请输入需要获取的报纸所发行的日期:"
first_run = False
else:
prompt_message = "\n请输入需要获取的报纸所发行的日期:"
user_input = input(prompt_message).lower()
if user_input == 'g':
webbrowser.open(guide_url)
print("正在打开说明网页...")
continue
if user_input in ['help', 'h']:
webbrowser.open(help_url)
print("正在打开使用帮助...")
continue
target_date, need_confirmation = parse_date_input(user_input)
while target_date is None:
print("无法识别输入内容,请重新输入。输入help后回车打开使用帮助。")
user_input = input("\n请输入需要获取的报纸所发行的日期:").lower()
if user_input in ['guide', 'g']:
webbrowser.open(guide_url)
print("正在打开说明网页...")
break
elif user_input in ['help', 'h']:
webbrowser.open(help_url)
print("正在打开使用帮助...")
break
target_date, need_confirmation = parse_date_input(user_input)
else:
if not need_confirmation or input(f"即将自动获取{format_date_chinese(datetime.strptime(target_date, '%Y-%m/%d'))}所发行的《人民日报》(电子版),按回车确认。") == '':
if datetime.strptime(target_date, '%Y-%m/%d') < datetime(2021, 1, 1):
print("本程序所有数据来自http://paper.people.com.cn/ ,此网站提供了2021年1月1日及以后发行的《人民日报》(电子版),更早的报纸暂未开放获取。")
continue
articles_data, today = fetch_articles(target_date)
if articles_data:
create_epub(articles_data, today)
print(f"已成功获取《人民日报》(电子版 {format_date_chinese(datetime.strptime(target_date, '%Y-%m/%d'))})。您可以继续输入日期,或手动关闭窗口。")
continue
else:
if datetime.now().hour < 6 and user_input == "":
yesterday = (datetime.now() - timedelta(days=1)).strftime('%Y-%m/%d')
confirm_input = input(f"今天的《人民日报》(电子版)可能还没有发行,即将获取{format_date_chinese(datetime.strptime(yesterday, '%Y-%m/%d'))}的《人民日报》(电子版),按回车确认。")
if confirm_input in ['back', 'b']:
continue
articles_data, actual_date = fetch_articles(yesterday)
if articles_data:
create_epub(articles_data, actual_date)
print(f"《人民日报》{format_date_chinese(datetime.strptime(actual_date, '%Y-%m/%d'))}的电子版已经生成。")
else:
print("无法获取昨天的文章数据。")
continue | 9,793 | Python | .py | 182 | 36.456044 | 159 | 0.569245 | ARRray0-o/People-sDailyEpubCreator | 8 | 5 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,898 | config.py | lauratheq_lazywp/src/config.py | #!/usr/bin/python3
'''
Sets the log level. Possible levels:
NOTSET
DEBUG
INFO
WARN
ERROR
CRITICAL
'''
LOG_LEVEL = 'DEBUG'
| 155 | Python | .py | 11 | 10.636364 | 36 | 0.617021 | lauratheq/lazywp | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |
2,289,899 | logging.py | lauratheq_lazywp/src/logging.py | #!/usr/bin/python3
# TODO comments
import logging
class Logging:
logger = None
log_level = 0
log_levels = {
"NOTSET": 0,
"DEBUG": 10,
"INFO": 20,
"WARN": 30,
"ERROR": 40,
"CRITICAL": 50
}
def __init__(self, **kwargs):
'''
Initializes the class and sets all needed variables
Parameters:
kwargs['log_level'] (str): the setted log level
Returns:
void
'''
if 'log_level' in kwargs:
self.log_level = kwargs['log_level']
# initialize logger
self.init_logger()
def init_logger(self):
'''
initializes the logger and sets the log level
Parameters:
self (obj): the class object
Returns:
void
'''
self.logger = logging.getLogger('lazywp')
self.logger.setLevel(self.log_levels[self.log_level])
format = '[%(asctime)s] %(filename)s - %(message)s'
formatter = logging.Formatter(format, "%Y-%m-%d %H:%M:%S")
# file logging
fh = logging.FileHandler('/var/log/lazywp.log')
fh.setFormatter(formatter)
fh.setLevel(self.log_levels[self.log_level])
self.logger.addHandler(fh)
| 1,295 | Python | .py | 43 | 21.209302 | 66 | 0.554464 | lauratheq/lazywp | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:43 PM (Europe/Amsterdam) |