WE CAN HEAR THE AUDIO!!!! but uhh rendering to a file is cooked lmao
This commit is contained in:
29
src/main.py
29
src/main.py
@@ -1,18 +1,33 @@
|
||||
print("=== TerminalDAW - Version 0.0.1 ===\n")
|
||||
|
||||
from ui.app import AppUI
|
||||
from project import Project, ProjectChannel, AudioChannelChunk
|
||||
import librosa
|
||||
import sounddevice
|
||||
import mp3
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Loading project...")
|
||||
"""test_project = Project(channels=[
|
||||
ProjectChannel(chunks=[
|
||||
AudioChannelChunk(*librosa.load("120 bpm amen break.mp3", mono=False), position=0, name="120 bpm amen break.mp3"),
|
||||
], name="drums"),
|
||||
])
|
||||
#test_project.write_to_file("test_project.tdp")"""
|
||||
test_project = Project.from_file("test_project.tdp")
|
||||
"""test_project = Project(song_length=2)
|
||||
|
||||
drum_channel = ProjectChannel(
|
||||
test_project,
|
||||
name="Drums",
|
||||
volume=5,
|
||||
|
||||
)
|
||||
drum_channel.chunks.append(AudioChannelChunk(
|
||||
drum_channel,
|
||||
position=0,
|
||||
*librosa.load("120 bpm amen break.mp3", mono=False, sr=test_project.sample_rate),
|
||||
name="120 bpm amen break.mp3"
|
||||
))
|
||||
|
||||
test_project.channels.append(drum_channel)
|
||||
|
||||
test_project.write_to_file("test_project.tdp")"""
|
||||
test_project = Project.from_file("test_project.tdp")
|
||||
|
||||
# start the ui
|
||||
print("Starting UI...")
|
||||
|
||||
0
src/output.mp3
Normal file
0
src/output.mp3
Normal file
109
src/project.py
109
src/project.py
@@ -2,6 +2,7 @@ import msgpack
|
||||
import enum
|
||||
import numpy as np
|
||||
import msgpack_numpy
|
||||
import pedalboard
|
||||
from dataclasses import dataclass, asdict
|
||||
|
||||
|
||||
@@ -18,13 +19,18 @@ class ChunkType(enum.Enum):
|
||||
MIDI = enum.auto()
|
||||
|
||||
class ChannelChunk:
|
||||
def __init__(self, position: float = 0.0, name: str = "Chunk", chunk_type: ChunkType = ChunkType.CHUNK):
|
||||
def __init__(self, channel, position: float = 0.0, name: str = "Chunk", chunk_type: ChunkType = ChunkType.CHUNK):
|
||||
self.channel = channel
|
||||
self.position = position # position is how many bars into the song the chunk is
|
||||
self.name = name
|
||||
self.chunk_type = chunk_type
|
||||
|
||||
def from_json(json: dict) -> ChannelChunk:
|
||||
def render(self):
|
||||
pass
|
||||
|
||||
def from_json(json: dict, channel) -> ChannelChunk:
|
||||
return ChannelChunk(
|
||||
channel,
|
||||
chunk_type = ChunkType(json["type"]),
|
||||
name = json["name"],
|
||||
position = json["position"]
|
||||
@@ -38,13 +44,27 @@ class ChannelChunk:
|
||||
}
|
||||
|
||||
class AudioChannelChunk(ChannelChunk):
|
||||
def __init__(self, audio_data: np.ndarray, sample_rate: int, position: float = 0.0, name: str = "Sample"):
|
||||
super().__init__(position, name, chunk_type=ChunkType.AUDIO)
|
||||
def __init__(self, channel, audio_data: np.ndarray, sample_rate: int, position: float = 0.0, name: str = "Sample"):
|
||||
super().__init__(channel, position, name, chunk_type=ChunkType.AUDIO)
|
||||
self.audio_data = audio_data
|
||||
self.sample_rate = sample_rate
|
||||
|
||||
def from_json(json: dict) -> ChannelChunk:
|
||||
def render(self):
|
||||
start_sample = int(self.position * self.channel.project.samples_per_bar)
|
||||
|
||||
audio = self.audio_data.T
|
||||
|
||||
# ensure stereo
|
||||
if audio.ndim == 1:
|
||||
audio = np.stack([audio, audio], axis=1)
|
||||
|
||||
end_sample = start_sample + len(audio)
|
||||
|
||||
return start_sample, end_sample, audio
|
||||
|
||||
def from_json(json: dict, channel) -> ChannelChunk:
|
||||
return AudioChannelChunk(
|
||||
channel,
|
||||
name = json["name"],
|
||||
position = json["position"],
|
||||
audio_data = json["audio_data"],
|
||||
@@ -66,7 +86,8 @@ chunk_type_associations = {
|
||||
}
|
||||
|
||||
class ProjectChannel:
|
||||
def __init__(self, name: str = "", volume: int = 0, pan: int = 0, mute: bool = False, solo: bool = False, chunks: list[ChannelChunk] = []):
|
||||
def __init__(self, project, name: str = "", volume: int = 0, pan: int = 0, mute: bool = False, solo: bool = False, chunks: list[ChannelChunk] = []):
|
||||
self.project = project
|
||||
self.name = name
|
||||
self.volume = volume
|
||||
self.pan = pan
|
||||
@@ -74,16 +95,57 @@ class ProjectChannel:
|
||||
self.solo = solo
|
||||
self.chunks = chunks
|
||||
|
||||
def from_json(json: dict) -> ProjectChannel:
|
||||
return ProjectChannel(
|
||||
self.board = pedalboard.Pedalboard([
|
||||
pedalboard.Reverb()
|
||||
])
|
||||
|
||||
def pan_stereo(self, stereo, pan):
|
||||
pan = np.clip(pan, -1.0, 1.0)
|
||||
|
||||
left_gain = np.cos((pan + 1) * np.pi / 4)
|
||||
right_gain = np.sin((pan + 1) * np.pi / 4)
|
||||
|
||||
out = stereo.copy()
|
||||
out[:, 0] *= left_gain
|
||||
out[:, 1] *= right_gain
|
||||
|
||||
return out
|
||||
|
||||
def render(self):
|
||||
buffer = np.zeros((self.project.total_song_samples, 2), dtype=np.float32)
|
||||
|
||||
# render each chunk
|
||||
for chunk in self.chunks:
|
||||
start, end, audio = chunk.render()
|
||||
buffer[start:end] += audio
|
||||
|
||||
# apply effects
|
||||
buffer = self.board(buffer, self.project.sample_rate)
|
||||
|
||||
# apply volume
|
||||
gain = 10 ** (self.volume / 20)
|
||||
buffer *= gain
|
||||
|
||||
# pan
|
||||
self.pan_stereo(buffer, self.pan/100)
|
||||
|
||||
return buffer
|
||||
|
||||
def from_json(json: dict, project) -> ProjectChannel:
|
||||
channel = ProjectChannel(
|
||||
project,
|
||||
name = json["name"],
|
||||
volume = json["volume"],
|
||||
pan = json["pan"],
|
||||
mute = json["mute"],
|
||||
solo = json["solo"],
|
||||
chunks = [chunk_type_associations[ChunkType(chunk["type"])].from_json(chunk) for chunk in json["chunks"]]
|
||||
|
||||
)
|
||||
|
||||
channel.chunks = [chunk_type_associations[ChunkType(chunk["type"])].from_json(chunk, channel) for chunk in json["chunks"]]
|
||||
|
||||
return channel
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
"name": self.name,
|
||||
@@ -95,30 +157,51 @@ class ProjectChannel:
|
||||
}
|
||||
|
||||
class Project:
|
||||
def __init__(self, channels: list[ProjectChannel], version: float = 1.0, bpm: float = 120, time_signature: TimeSignature = TimeSignature(4, 4)):
|
||||
def __init__(self, channels: list[ProjectChannel] = [], version: float = 1.0, bpm: float = 120, time_signature: TimeSignature = TimeSignature(4, 4), song_length: float = 16, sample_rate: int = 44100):
|
||||
self.version = version
|
||||
self.bpm = bpm
|
||||
self.sample_rate = sample_rate
|
||||
self.time_signature = time_signature
|
||||
self.channels = channels
|
||||
self.song_length = song_length # length of the song in bars
|
||||
|
||||
self.seconds_per_bar = (60.0 / self.bpm) * self.time_signature.beats_per_measure
|
||||
self.samples_per_bar = int(self.seconds_per_bar * self.sample_rate)
|
||||
self.total_song_samples = int(self.samples_per_bar * self.song_length)
|
||||
|
||||
def render(self):
|
||||
buffer = np.zeros((self.total_song_samples, 2), dtype=np.float32)
|
||||
|
||||
for channel in self.channels:
|
||||
buffer += channel.render()
|
||||
|
||||
return buffer
|
||||
|
||||
def from_file(file_path: str) -> Project:
|
||||
with open(file_path, "rb") as f:
|
||||
return Project.from_json(msgpack.unpackb(f.read()))
|
||||
|
||||
def from_json(json: dict) -> Project:
|
||||
return Project(
|
||||
project = Project(
|
||||
version = json["version"],
|
||||
time_signature = TimeSignature(json["time_signature"]["beats_per_measure"], json["time_signature"]["note_value"]),
|
||||
bpm = json["bpm"],
|
||||
channels = [ProjectChannel.from_json(channel) for channel in json["channels"]]
|
||||
song_length = json["song_length"],
|
||||
sample_rate = json["sample_rate"]
|
||||
)
|
||||
|
||||
project.channels = [ProjectChannel.from_json(channel, project) for channel in json["channels"]]
|
||||
|
||||
return project
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
"version": self.version,
|
||||
"time_signature": asdict(self.time_signature),
|
||||
"bpm": self.bpm,
|
||||
"channels": [channel.to_json() for channel in self.channels]
|
||||
"channels": [channel.to_json() for channel in self.channels],
|
||||
"song_length": self.song_length,
|
||||
"sample_rate": self.sample_rate
|
||||
}
|
||||
|
||||
def write_to_file(self, file_path: str):
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
from project import Project
|
||||
import pedalboard
|
||||
import sounddevice as sd
|
||||
import numpy as np
|
||||
from textual.app import App
|
||||
|
||||
|
||||
class SongPlayer:
|
||||
def __init__(self, app: App):
|
||||
self.app = app
|
||||
|
||||
def play_song(self, project: Project):
|
||||
pass
|
||||
Binary file not shown.
@@ -8,22 +8,17 @@ from ui.widgets.channel import Channel
|
||||
|
||||
from project import ProjectChannel
|
||||
|
||||
from song_player import SongPlayer
|
||||
|
||||
|
||||
class AppUI(App):
|
||||
CSS_PATH = "../assets/style.tcss"
|
||||
|
||||
theme = "tokyo-night"
|
||||
#ENABLE_COMMAND_PALETTE = False
|
||||
|
||||
def __init__(self, project):
|
||||
super().__init__()
|
||||
self.zoom_level = 0.05
|
||||
self.last_zoom_level = self.zoom_level
|
||||
self.project = project
|
||||
|
||||
self.song_player = SongPlayer(self)
|
||||
|
||||
def create_channel(self, name: str):
|
||||
self.query_one("#channels").mount(Channel(
|
||||
@@ -36,9 +31,6 @@ class AppUI(App):
|
||||
name
|
||||
))
|
||||
|
||||
def on_mount(self):
|
||||
self.song_player.play_song(self.app.project)
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
with Tabs(id="top-menu"):
|
||||
yield Tab("File")
|
||||
|
||||
@@ -54,6 +54,14 @@ class Channel(VerticalGroup):
|
||||
self.solo = solo
|
||||
self.pan = pan
|
||||
self.volume = volume
|
||||
|
||||
def on_checkbox_changed(self, event: Checkbox.Changed):
|
||||
if event.checkbox.id == "mute":
|
||||
self.muted = event.value
|
||||
|
||||
self.app.query_one("#timeline").query_one()
|
||||
elif event.checkbox.id == "solo":
|
||||
self.solo = event.value
|
||||
|
||||
def on_slider_changed(self, event: Slider.Changed):
|
||||
if event.slider.id == "volume":
|
||||
|
||||
@@ -2,6 +2,8 @@ from textual.containers import Horizontal
|
||||
from textual.app import ComposeResult
|
||||
from textual.widgets import Button, Input, Static
|
||||
|
||||
import sounddevice as sd
|
||||
|
||||
|
||||
class ProjectSettings(Horizontal):
|
||||
DEFAULT_CSS = """
|
||||
@@ -37,6 +39,10 @@ class ProjectSettings(Horizontal):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.border_title = "Project"
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed):
|
||||
if event.button.id == "play-button":
|
||||
sd.play(self.app.project.render())
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
yield Button("▶", tooltip="Play song", flat=True, id="play-button", variant="success") # icon becomes "⏸" when song is playing
|
||||
|
||||
@@ -14,17 +14,27 @@ class TimelineRow(Horizontal):
|
||||
background: $surface-lighten-1;
|
||||
height: 8;
|
||||
margin-bottom: 1;
|
||||
width: 100;
|
||||
|
||||
&.-muted {
|
||||
background: $error 25%;
|
||||
}
|
||||
|
||||
&.-solo {
|
||||
background: $warning 25%;
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
class Timeline(Vertical):
|
||||
DEFAULT_CSS = """
|
||||
Timeline {
|
||||
overflow-x: auto;
|
||||
|
||||
#rows {
|
||||
hatch: "-" $surface-lighten-1;
|
||||
padding: 0 0;
|
||||
overflow-x: auto;
|
||||
|
||||
|
||||
|
||||
.beat-line {
|
||||
color: $surface-lighten-1;
|
||||
@@ -58,6 +68,9 @@ class Timeline(Vertical):
|
||||
def calc_bar_offset(self):
|
||||
self.bar_offset = self.app.project.bpm / 8 * (0.03333333333 / self.app.zoom_level)
|
||||
|
||||
for row in self.query(TimelineRow):
|
||||
row.styles.width = self.bar_offset * self.app.project.song_length
|
||||
|
||||
@on(events.MouseScrollDown)
|
||||
async def mouse_scroll_down(self, event: events.MouseScrollDown):
|
||||
self.app.zoom_level += (self.app.scroll_sensitivity_x / 200)
|
||||
@@ -93,18 +106,17 @@ class Timeline(Vertical):
|
||||
|
||||
|
||||
with VerticalScroll(id="rows"):
|
||||
|
||||
|
||||
|
||||
for channel in self.app.project.channels:
|
||||
with TimelineRow():
|
||||
with TimelineRow() as row:
|
||||
row.styles.width = self.bar_offset * self.app.project.song_length
|
||||
|
||||
for chunk in channel.chunks:
|
||||
if chunk.chunk_type == ChunkType.CHUNK:
|
||||
yield Chunk(chunk_name=chunk.name, bar_pos=chunk.position)
|
||||
elif chunk.chunk_type == ChunkType.AUDIO:
|
||||
yield AudioChunk(chunk.audio_data, chunk.sample_rate, chunk.name, chunk.position)
|
||||
|
||||
for i in range(1, 17):
|
||||
for i in range(1, self.app.project.song_length):
|
||||
bar = None
|
||||
if i % 4 == 0:
|
||||
bar = Rule.vertical(classes="bar-line", line_style="double")
|
||||
|
||||
Reference in New Issue
Block a user