testing multiple tracks
This commit is contained in:
@@ -7,12 +7,12 @@ if __name__ == "__main__":
|
|||||||
print("Loading project...")
|
print("Loading project...")
|
||||||
test_project = Project(channels=[
|
test_project = Project(channels=[
|
||||||
ProjectChannel(chunks=[
|
ProjectChannel(chunks=[
|
||||||
AudioChannelChunk(*librosa.load("120 bpm amen break.mp3"), position=0),
|
AudioChannelChunk(*librosa.load("120 bpm amen break.mp3", mono=False), position=0, name="120 bpm amen break.mp3"),
|
||||||
AudioChannelChunk(*librosa.load("120 bpm amen break.mp3"), position=1),
|
AudioChannelChunk(*librosa.load("120 bpm amen break.mp3", mono=False), position=1, name="120 bpm amen break.mp3"),
|
||||||
AudioChannelChunk(*librosa.load("120 bpm amen break.mp3"), position=2)
|
AudioChannelChunk(*librosa.load("120 bpm amen break.mp3", mono=False), position=2, name="120 bpm amen break.mp3")
|
||||||
], name="drums"),
|
], name="drums"),
|
||||||
ProjectChannel(chunks=[
|
ProjectChannel(chunks=[
|
||||||
|
AudioChannelChunk(*librosa.load("piano chords - Bmin 120BPM.wav", mono=False), name="piano chords - Bmin 120BPM.wav")
|
||||||
], name="piano")
|
], name="piano")
|
||||||
])#.from_file("test_project.tdp")
|
])#.from_file("test_project.tdp")
|
||||||
|
|
||||||
|
|||||||
BIN
src/piano chords - Bmin 120BPM.wav
Normal file
BIN
src/piano chords - Bmin 120BPM.wav
Normal file
Binary file not shown.
@@ -9,8 +9,6 @@ from ui.widgets.project_settings import ProjectSettings
|
|||||||
class AppUI(App):
|
class AppUI(App):
|
||||||
CSS_PATH = "../assets/style.tcss"
|
CSS_PATH = "../assets/style.tcss"
|
||||||
|
|
||||||
theme = "atom-one-dark"
|
|
||||||
|
|
||||||
def __init__(self, project):
|
def __init__(self, project):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.zoom_level = 0.05
|
self.zoom_level = 0.05
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from ui.widgets.chunk_types.chunk import Chunk
|
|||||||
class AudioChunk(Chunk):
|
class AudioChunk(Chunk):
|
||||||
DEFAULT_CSS = """
|
DEFAULT_CSS = """
|
||||||
AudioChunk {
|
AudioChunk {
|
||||||
border: tab $accent;
|
border: tab $secondary;
|
||||||
PlotWidget {
|
PlotWidget {
|
||||||
height: 1fr;
|
height: 1fr;
|
||||||
|
|
||||||
@@ -51,7 +51,7 @@ class AudioChunk(Chunk):
|
|||||||
def on_mount(self):
|
def on_mount(self):
|
||||||
for plot in self.query(PlotWidget):
|
for plot in self.query(PlotWidget):
|
||||||
plot: PlotWidget = plot # just for type checking
|
plot: PlotWidget = plot # just for type checking
|
||||||
|
|
||||||
plot.margin_top = 0
|
plot.margin_top = 0
|
||||||
plot.margin_left = 0
|
plot.margin_left = 0
|
||||||
plot.margin_bottom = 0
|
plot.margin_bottom = 0
|
||||||
@@ -91,7 +91,7 @@ class AudioChunk(Chunk):
|
|||||||
x,
|
x,
|
||||||
y,
|
y,
|
||||||
1.0,
|
1.0,
|
||||||
bar_style=self.app.theme_variables["warning"],
|
bar_style=self.app.theme_variables["secondary"],
|
||||||
hires_mode=HiResMode.BRAILLE
|
hires_mode=HiResMode.BRAILLE
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -114,8 +114,6 @@ class AudioChunk(Chunk):
|
|||||||
samples.append(self.audio[channel, sample])"""
|
samples.append(self.audio[channel, sample])"""
|
||||||
|
|
||||||
yield PlotWidget(allow_pan_and_zoom=False, id=f"channel-{channel}")
|
yield PlotWidget(allow_pan_and_zoom=False, id=f"channel-{channel}")
|
||||||
|
|
||||||
#yield Sparkline(data=samples)
|
|
||||||
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -125,6 +123,4 @@ class AudioChunk(Chunk):
|
|||||||
for sample in range(0, self.num_samples, int(self.sample_rate*0.1)):
|
for sample in range(0, self.num_samples, int(self.sample_rate*0.1)):
|
||||||
samples.append(self.audio[sample])"""
|
samples.append(self.audio[sample])"""
|
||||||
|
|
||||||
yield PlotWidget(allow_pan_and_zoom=False)
|
yield PlotWidget(allow_pan_and_zoom=False)
|
||||||
|
|
||||||
#yield Sparkline(data=samples)
|
|
||||||
@@ -80,6 +80,11 @@ class Timeline(Vertical):
|
|||||||
for bar_line in self.query(Rule):
|
for bar_line in self.query(Rule):
|
||||||
if not isinstance(bar_line, PlayHead):
|
if not isinstance(bar_line, PlayHead):
|
||||||
bar_line.offset = (self.bar_offset * bar_line.index, 0)
|
bar_line.offset = (self.bar_offset * bar_line.index, 0)
|
||||||
|
|
||||||
|
if self.app.zoom_level >= 0.09 and bar_line.has_class("beat-line"):
|
||||||
|
bar_line.display = False
|
||||||
|
else:
|
||||||
|
bar_line.display = True
|
||||||
|
|
||||||
def compose(self) -> ComposeResult:
|
def compose(self) -> ComposeResult:
|
||||||
|
|
||||||
@@ -98,7 +103,7 @@ class Timeline(Vertical):
|
|||||||
yield Chunk(chunk_name=chunk.name, bar_pos=chunk.position)
|
yield Chunk(chunk_name=chunk.name, bar_pos=chunk.position)
|
||||||
elif chunk.chunk_type == ChunkType.AUDIO:
|
elif chunk.chunk_type == ChunkType.AUDIO:
|
||||||
yield AudioChunk(chunk.audio_data, chunk.sample_rate, chunk.name, chunk.position)
|
yield AudioChunk(chunk.audio_data, chunk.sample_rate, chunk.name, chunk.position)
|
||||||
|
|
||||||
for i in range(1, 17):
|
for i in range(1, 17):
|
||||||
bar = None
|
bar = None
|
||||||
if i % 4 == 0:
|
if i % 4 == 0:
|
||||||
@@ -113,4 +118,4 @@ class Timeline(Vertical):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
#yield PlayHead()
|
yield PlayHead()
|
||||||
Reference in New Issue
Block a user