From 9d2397b5ee218b0baa8d262205f3c9cc619cac95 Mon Sep 17 00:00:00 2001 From: SatyrDiamond Date: Fri, 9 Jun 2023 20:34:10 -0400 Subject: [PATCH] update + audio warp/stretch --- DawVert.py | 9 +- LICENSE | 0 README.md | 0 config_vst.py | 0 config_vst_windows.py | 0 data_idvals/adlib_rol_inst.csv | 0 data_idvals/beepbox_inst.csv | 0 data_idvals/boscaceoil_drumkit_midi.csv | 0 data_idvals/boscaceoil_drumkit_simple.csv | 0 data_idvals/boscaceoil_drumkit_sion.csv | 0 data_idvals/boscaceoil_inst.csv | 0 data_idvals/caustic_inst.csv | 0 data_idvals/mariopaint_inst.csv | 0 data_idvals/midi_ctrl.csv | 0 data_idvals/midi_drumkit.csv | 0 data_idvals/midi_inst.csv | 0 data_idvals/midi_inst_drums.csv | 0 data_idvals/midi_inst_group.csv | 0 data_idvals/names_gmmidi.csv | 0 data_idvals/names_gmmidi_drums.csv | 0 data_idvals/noteblockstudio_inst.csv | 0 data_idvals/notessimo_v2_inst.csv | 0 data_idvals/notessimo_v3_inst.csv | 0 data_idvals/notessimo_v3_inst_group.csv | 0 data_idvals/onlineseq_drumkit.csv | 0 data_idvals/onlineseq_drumkit_2013.csv | 0 data_idvals/onlineseq_drumkit_808.csv | 0 data_idvals/onlineseq_drumkit_909.csv | 0 data_idvals/onlineseq_drumkit_retro.csv | 0 data_idvals/onlineseq_inst.csv | 0 data_idvals/orgyana_inst_drums.csv | 0 data_idvals/soundclub2_inst.csv | 0 dawvert_cmd.py | 0 docs/dawvert.svg | 0 docs/experiments.md | 0 docs/input_plugins.md | 0 docs/vsts.md | 0 experiments_plugin_input/__init__.py | 0 experiments_plugin_input/r_basicpitch.py | 0 experiments_plugin_input/r_color_art.py | 0 functions/audio.py | 49 +++- functions/audio_wav.py | 0 functions/auto.py | 0 functions/colors.py | 0 functions/core.py | 10 +- functions/data_bytes.py | 0 functions/data_values.py | 58 ++++- functions/folder_samples.py | 0 functions/format_caustic.py | 0 functions/format_flp_dec.py | 72 +++--- functions/format_flp_enc.py | 4 +- functions/format_midi_in.py | 0 functions/format_midi_out.py | 0 functions/idvals.py | 0 functions/list_vst.py | 0 functions/midi_exdata.py | 0 functions/note_convert.py | 0 functions/note_data.py | 0 functions/note_mod.py | 0 functions/notelist_data.py | 0 functions/params_vst.py | 0 functions/placement_data.py | 39 --- functions/placements.py | 0 functions/plug_conv.py | 0 functions/plugin_vst2.py | 0 functions/song.py | 0 functions/song_compat.py | 235 +++++++++++++++--- functions/song_convert.py | 4 + functions/song_tracker.py | 0 functions/tracks.py | 0 functions/xtramath.py | 0 functions_plugconv/input_audiosauna.py | 0 functions_plugconv/input_flstudio.py | 0 functions_plugconv/input_flstudio_wrapper.py | 0 functions_plugconv/input_jummbox.py | 0 functions_plugconv/input_pxtone.py | 0 functions_plugconv/input_soundchip.py | 0 functions_plugconv/output_audiosauna_vst2.py | 0 functions_plugconv/output_flstudio_vst2.py | 0 functions_plugconv/output_lmms_vst2.py | 0 .../output_multisampler_vst2.py | 0 .../output_namco163_famistudio_vst2.py | 0 functions_plugconv/output_onlineseq_vst2.py | 0 functions_plugconv/output_piyopiyo_vst2.py | 0 functions_plugconv/output_retro_vst2.py | 0 functions_plugconv/output_sampler_vst2.py | 0 functions_plugconv/output_simple_vst2.py | 0 functions_plugconv/output_slicer_vst2.py | 0 functions_plugconv/output_soundchip_vst2.py | 0 functions_plugin/lmms_auto.py | 0 functions_plugparams/data_nullbytegroup.py | 0 functions_plugparams/data_vc2xml.py | 0 functions_plugparams/file_vst2.py | 0 functions_plugparams/params_drops.py | 0 functions_plugparams/params_grace.py | 0 functions_plugparams/params_kickmess.py | 0 functions_plugparams/params_ninjas2.py | 0 functions_plugparams/params_oxe_fm.py | 0 functions_plugparams/params_various_fx.py | 0 functions_plugparams/params_various_inst.py | 0 functions_plugparams/params_vital.py | 0 .../params_vital_wavetable.py | 0 plugin_input/__init__.py | 0 plugin_input/ableton.py | 107 ++++---- plugin_input/cvpj_m.py | 0 plugin_input/cvpj_mi.py | 0 plugin_input/cvpj_r.py | 0 plugin_input/m_adlib_rol.py | 0 plugin_input/m_lovelycomposer.py | 0 plugin_input/m_mariopaint_msq.py | 0 plugin_input/m_mariopaint_mss.py | 0 plugin_input/m_mariopaint_smp.py | 0 plugin_input/m_mc_noteblock_studio.py | 0 plugin_input/m_midi.py | 0 plugin_input/m_pxtone.py | 0 plugin_input/m_smaf.py | 0 plugin_input/mi_boscaceoil.py | 0 plugin_input/mi_deflemask.py | 0 plugin_input/mi_famistudiotxt.py | 0 plugin_input/mi_famitrackertxt.py | 0 plugin_input/mi_flp.py | 66 +++-- plugin_input/mi_jummbox.py | 0 plugin_input/mi_notessimo_v2.py | 0 plugin_input/mi_notessimo_v3.py | 0 plugin_input/mi_pixitracker.py | 0 plugin_input/mi_tracker_it.py | 0 plugin_input/mi_tracker_mod.py | 0 plugin_input/mi_tracker_s3m.py | 0 plugin_input/mi_tracker_umx.py | 0 plugin_input/mi_tracker_xm.py | 0 plugin_input/mi_trackerboy.py | 0 plugin_input/r_audiosauna.py | 0 plugin_input/r_dawproject.py | 0 plugin_input/r_flipperzero.py | 0 plugin_input/r_lmms.py | 0 plugin_input/r_mekimeki.py | 0 plugin_input/r_onlineseq.py | 0 plugin_input/r_orgyana.py | 0 plugin_input/r_petaporon.py | 0 plugin_input/r_piyopiyo.py | 0 plugin_input/r_soundation.py | 0 plugin_input/r_wavtool.py | 12 +- plugin_input/ri_caustic.py | 0 plugin_input/ri_soundclub2.py | 0 plugin_output/__init__.py | 0 plugin_output/ableton.py | 114 ++++----- plugin_output/cvpj.py | 0 plugin_output/cvpj_m.py | 0 plugin_output/cvpj_mi.py | 0 plugin_output/daw_flp.py | 88 +++---- plugin_output/daw_lmms.py | 0 plugin_output/dawproject.py | 0 plugin_output/midi.py | 0 plugin_output/muse.py | 0 plugin_output/reaper.py | 0 requirements.txt | 3 +- 156 files changed, 541 insertions(+), 329 deletions(-) mode change 100755 => 100644 LICENSE mode change 100755 => 100644 README.md mode change 100755 => 100644 config_vst.py mode change 100755 => 100644 config_vst_windows.py mode change 100755 => 100644 data_idvals/adlib_rol_inst.csv mode change 100755 => 100644 data_idvals/beepbox_inst.csv mode change 100755 => 100644 data_idvals/boscaceoil_drumkit_midi.csv mode change 100755 => 100644 data_idvals/boscaceoil_drumkit_simple.csv mode change 100755 => 100644 data_idvals/boscaceoil_drumkit_sion.csv mode change 100755 => 100644 data_idvals/boscaceoil_inst.csv mode change 100755 => 100644 data_idvals/caustic_inst.csv mode change 100755 => 100644 data_idvals/mariopaint_inst.csv mode change 100755 => 100644 data_idvals/midi_ctrl.csv mode change 100755 => 100644 data_idvals/midi_drumkit.csv mode change 100755 => 100644 data_idvals/midi_inst.csv mode change 100755 => 100644 data_idvals/midi_inst_drums.csv mode change 100755 => 100644 data_idvals/midi_inst_group.csv mode change 100755 => 100644 data_idvals/names_gmmidi.csv mode change 100755 => 100644 data_idvals/names_gmmidi_drums.csv mode change 100755 => 100644 data_idvals/noteblockstudio_inst.csv mode change 100755 => 100644 data_idvals/notessimo_v2_inst.csv mode change 100755 => 100644 data_idvals/notessimo_v3_inst.csv mode change 100755 => 100644 data_idvals/notessimo_v3_inst_group.csv mode change 100755 => 100644 data_idvals/onlineseq_drumkit.csv mode change 100755 => 100644 data_idvals/onlineseq_drumkit_2013.csv mode change 100755 => 100644 data_idvals/onlineseq_drumkit_808.csv mode change 100755 => 100644 data_idvals/onlineseq_drumkit_909.csv mode change 100755 => 100644 data_idvals/onlineseq_drumkit_retro.csv mode change 100755 => 100644 data_idvals/onlineseq_inst.csv mode change 100755 => 100644 data_idvals/orgyana_inst_drums.csv mode change 100755 => 100644 data_idvals/soundclub2_inst.csv mode change 100755 => 100644 dawvert_cmd.py mode change 100755 => 100644 docs/dawvert.svg mode change 100755 => 100644 docs/experiments.md mode change 100755 => 100644 docs/input_plugins.md mode change 100755 => 100644 docs/vsts.md mode change 100755 => 100644 experiments_plugin_input/__init__.py mode change 100755 => 100644 experiments_plugin_input/r_basicpitch.py mode change 100755 => 100644 experiments_plugin_input/r_color_art.py mode change 100755 => 100644 functions/audio.py mode change 100755 => 100644 functions/audio_wav.py mode change 100755 => 100644 functions/auto.py mode change 100755 => 100644 functions/colors.py mode change 100755 => 100644 functions/core.py mode change 100755 => 100644 functions/data_bytes.py mode change 100755 => 100644 functions/data_values.py mode change 100755 => 100644 functions/folder_samples.py mode change 100755 => 100644 functions/format_caustic.py mode change 100755 => 100644 functions/format_flp_enc.py mode change 100755 => 100644 functions/format_midi_in.py mode change 100755 => 100644 functions/format_midi_out.py mode change 100755 => 100644 functions/idvals.py mode change 100755 => 100644 functions/list_vst.py mode change 100755 => 100644 functions/midi_exdata.py mode change 100755 => 100644 functions/note_convert.py mode change 100755 => 100644 functions/note_data.py mode change 100755 => 100644 functions/note_mod.py mode change 100755 => 100644 functions/notelist_data.py mode change 100755 => 100644 functions/params_vst.py mode change 100755 => 100644 functions/placement_data.py mode change 100755 => 100644 functions/placements.py mode change 100755 => 100644 functions/plug_conv.py mode change 100755 => 100644 functions/plugin_vst2.py mode change 100755 => 100644 functions/song.py mode change 100755 => 100644 functions/song_compat.py mode change 100755 => 100644 functions/song_convert.py mode change 100755 => 100644 functions/song_tracker.py mode change 100755 => 100644 functions/tracks.py mode change 100755 => 100644 functions/xtramath.py mode change 100755 => 100644 functions_plugconv/input_audiosauna.py mode change 100755 => 100644 functions_plugconv/input_flstudio.py mode change 100755 => 100644 functions_plugconv/input_flstudio_wrapper.py mode change 100755 => 100644 functions_plugconv/input_jummbox.py mode change 100755 => 100644 functions_plugconv/input_pxtone.py mode change 100755 => 100644 functions_plugconv/input_soundchip.py mode change 100755 => 100644 functions_plugconv/output_audiosauna_vst2.py mode change 100755 => 100644 functions_plugconv/output_flstudio_vst2.py mode change 100755 => 100644 functions_plugconv/output_lmms_vst2.py mode change 100755 => 100644 functions_plugconv/output_multisampler_vst2.py mode change 100755 => 100644 functions_plugconv/output_namco163_famistudio_vst2.py mode change 100755 => 100644 functions_plugconv/output_onlineseq_vst2.py mode change 100755 => 100644 functions_plugconv/output_piyopiyo_vst2.py mode change 100755 => 100644 functions_plugconv/output_retro_vst2.py mode change 100755 => 100644 functions_plugconv/output_sampler_vst2.py mode change 100755 => 100644 functions_plugconv/output_simple_vst2.py mode change 100755 => 100644 functions_plugconv/output_slicer_vst2.py mode change 100755 => 100644 functions_plugconv/output_soundchip_vst2.py mode change 100755 => 100644 functions_plugin/lmms_auto.py mode change 100755 => 100644 functions_plugparams/data_nullbytegroup.py mode change 100755 => 100644 functions_plugparams/data_vc2xml.py mode change 100755 => 100644 functions_plugparams/file_vst2.py mode change 100755 => 100644 functions_plugparams/params_drops.py mode change 100755 => 100644 functions_plugparams/params_grace.py mode change 100755 => 100644 functions_plugparams/params_kickmess.py mode change 100755 => 100644 functions_plugparams/params_ninjas2.py mode change 100755 => 100644 functions_plugparams/params_oxe_fm.py mode change 100755 => 100644 functions_plugparams/params_various_fx.py mode change 100755 => 100644 functions_plugparams/params_various_inst.py mode change 100755 => 100644 functions_plugparams/params_vital.py mode change 100755 => 100644 functions_plugparams/params_vital_wavetable.py mode change 100755 => 100644 plugin_input/__init__.py mode change 100755 => 100644 plugin_input/ableton.py mode change 100755 => 100644 plugin_input/cvpj_m.py mode change 100755 => 100644 plugin_input/cvpj_mi.py mode change 100755 => 100644 plugin_input/cvpj_r.py mode change 100755 => 100644 plugin_input/m_adlib_rol.py mode change 100755 => 100644 plugin_input/m_lovelycomposer.py mode change 100755 => 100644 plugin_input/m_mariopaint_msq.py mode change 100755 => 100644 plugin_input/m_mariopaint_mss.py mode change 100755 => 100644 plugin_input/m_mariopaint_smp.py mode change 100755 => 100644 plugin_input/m_mc_noteblock_studio.py mode change 100755 => 100644 plugin_input/m_midi.py mode change 100755 => 100644 plugin_input/m_pxtone.py mode change 100755 => 100644 plugin_input/m_smaf.py mode change 100755 => 100644 plugin_input/mi_boscaceoil.py mode change 100755 => 100644 plugin_input/mi_deflemask.py mode change 100755 => 100644 plugin_input/mi_famistudiotxt.py mode change 100755 => 100644 plugin_input/mi_famitrackertxt.py mode change 100755 => 100644 plugin_input/mi_flp.py mode change 100755 => 100644 plugin_input/mi_jummbox.py mode change 100755 => 100644 plugin_input/mi_notessimo_v2.py mode change 100755 => 100644 plugin_input/mi_notessimo_v3.py mode change 100755 => 100644 plugin_input/mi_pixitracker.py mode change 100755 => 100644 plugin_input/mi_tracker_it.py mode change 100755 => 100644 plugin_input/mi_tracker_mod.py mode change 100755 => 100644 plugin_input/mi_tracker_s3m.py mode change 100755 => 100644 plugin_input/mi_tracker_umx.py mode change 100755 => 100644 plugin_input/mi_tracker_xm.py mode change 100755 => 100644 plugin_input/mi_trackerboy.py mode change 100755 => 100644 plugin_input/r_audiosauna.py mode change 100755 => 100644 plugin_input/r_dawproject.py mode change 100755 => 100644 plugin_input/r_flipperzero.py mode change 100755 => 100644 plugin_input/r_lmms.py mode change 100755 => 100644 plugin_input/r_mekimeki.py mode change 100755 => 100644 plugin_input/r_onlineseq.py mode change 100755 => 100644 plugin_input/r_orgyana.py mode change 100755 => 100644 plugin_input/r_petaporon.py mode change 100755 => 100644 plugin_input/r_piyopiyo.py mode change 100755 => 100644 plugin_input/r_soundation.py mode change 100755 => 100644 plugin_input/r_wavtool.py mode change 100755 => 100644 plugin_input/ri_caustic.py mode change 100755 => 100644 plugin_input/ri_soundclub2.py mode change 100755 => 100644 plugin_output/__init__.py mode change 100755 => 100644 plugin_output/ableton.py mode change 100755 => 100644 plugin_output/cvpj.py mode change 100755 => 100644 plugin_output/cvpj_m.py mode change 100755 => 100644 plugin_output/cvpj_mi.py mode change 100755 => 100644 plugin_output/daw_flp.py mode change 100755 => 100644 plugin_output/daw_lmms.py mode change 100755 => 100644 plugin_output/dawproject.py mode change 100755 => 100644 plugin_output/midi.py mode change 100755 => 100644 plugin_output/muse.py mode change 100755 => 100644 plugin_output/reaper.py mode change 100755 => 100644 requirements.txt diff --git a/DawVert.py b/DawVert.py index 2e3bde2a..7a87468f 100644 --- a/DawVert.py +++ b/DawVert.py @@ -253,6 +253,10 @@ if out_type != 'debug': CVPJ_j = song_compat.makecompat(CVPJ_j, in_type, in_dawcapabilities, out_dawcapabilities) +if in_type in ['r', 'm']: CVPJ_j = song_compat.makecompat_audiostretch(CVPJ_j, in_type, in_dawcapabilities, out_dawcapabilities) + +CVPJ_j = song_compat.makecompat_any(CVPJ_j, in_type, in_dawcapabilities, out_dawcapabilities) + if in_type == 'ri' and out_type == 'mi': CVPJ_j = song_convert.ri2mi(CVPJ_j) if in_type == 'ri' and out_type == 'r': CVPJ_j = song_convert.ri2r(CVPJ_j) @@ -269,11 +273,12 @@ CVPJ_j = song_convert.mi2m(CVPJ_j, extra_json) CVPJ_j = song_convert.m2r(CVPJ_j) - if out_type != 'debug': CVPJ_j = song_compat.makecompat(CVPJ_j, out_type, in_dawcapabilities, out_dawcapabilities) -CVPJ_j = song_compat.makecompat_any(CVPJ_j, out_type, in_dawcapabilities, out_dawcapabilities) +if out_type in ['r', 'm']: CVPJ_j = song_compat.makecompat_audiostretch(CVPJ_j, out_type, in_dawcapabilities, out_dawcapabilities) + +#CVPJ_j = song_compat.makecompat_any(CVPJ_j, out_type, in_dawcapabilities, out_dawcapabilities) # ------------------------------------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------------------------------------ diff --git a/LICENSE b/LICENSE old mode 100755 new mode 100644 diff --git a/README.md b/README.md old mode 100755 new mode 100644 diff --git a/config_vst.py b/config_vst.py old mode 100755 new mode 100644 diff --git a/config_vst_windows.py b/config_vst_windows.py old mode 100755 new mode 100644 diff --git a/data_idvals/adlib_rol_inst.csv b/data_idvals/adlib_rol_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/beepbox_inst.csv b/data_idvals/beepbox_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/boscaceoil_drumkit_midi.csv b/data_idvals/boscaceoil_drumkit_midi.csv old mode 100755 new mode 100644 diff --git a/data_idvals/boscaceoil_drumkit_simple.csv b/data_idvals/boscaceoil_drumkit_simple.csv old mode 100755 new mode 100644 diff --git a/data_idvals/boscaceoil_drumkit_sion.csv b/data_idvals/boscaceoil_drumkit_sion.csv old mode 100755 new mode 100644 diff --git a/data_idvals/boscaceoil_inst.csv b/data_idvals/boscaceoil_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/caustic_inst.csv b/data_idvals/caustic_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/mariopaint_inst.csv b/data_idvals/mariopaint_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/midi_ctrl.csv b/data_idvals/midi_ctrl.csv old mode 100755 new mode 100644 diff --git a/data_idvals/midi_drumkit.csv b/data_idvals/midi_drumkit.csv old mode 100755 new mode 100644 diff --git a/data_idvals/midi_inst.csv b/data_idvals/midi_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/midi_inst_drums.csv b/data_idvals/midi_inst_drums.csv old mode 100755 new mode 100644 diff --git a/data_idvals/midi_inst_group.csv b/data_idvals/midi_inst_group.csv old mode 100755 new mode 100644 diff --git a/data_idvals/names_gmmidi.csv b/data_idvals/names_gmmidi.csv old mode 100755 new mode 100644 diff --git a/data_idvals/names_gmmidi_drums.csv b/data_idvals/names_gmmidi_drums.csv old mode 100755 new mode 100644 diff --git a/data_idvals/noteblockstudio_inst.csv b/data_idvals/noteblockstudio_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/notessimo_v2_inst.csv b/data_idvals/notessimo_v2_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/notessimo_v3_inst.csv b/data_idvals/notessimo_v3_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/notessimo_v3_inst_group.csv b/data_idvals/notessimo_v3_inst_group.csv old mode 100755 new mode 100644 diff --git a/data_idvals/onlineseq_drumkit.csv b/data_idvals/onlineseq_drumkit.csv old mode 100755 new mode 100644 diff --git a/data_idvals/onlineseq_drumkit_2013.csv b/data_idvals/onlineseq_drumkit_2013.csv old mode 100755 new mode 100644 diff --git a/data_idvals/onlineseq_drumkit_808.csv b/data_idvals/onlineseq_drumkit_808.csv old mode 100755 new mode 100644 diff --git a/data_idvals/onlineseq_drumkit_909.csv b/data_idvals/onlineseq_drumkit_909.csv old mode 100755 new mode 100644 diff --git a/data_idvals/onlineseq_drumkit_retro.csv b/data_idvals/onlineseq_drumkit_retro.csv old mode 100755 new mode 100644 diff --git a/data_idvals/onlineseq_inst.csv b/data_idvals/onlineseq_inst.csv old mode 100755 new mode 100644 diff --git a/data_idvals/orgyana_inst_drums.csv b/data_idvals/orgyana_inst_drums.csv old mode 100755 new mode 100644 diff --git a/data_idvals/soundclub2_inst.csv b/data_idvals/soundclub2_inst.csv old mode 100755 new mode 100644 diff --git a/dawvert_cmd.py b/dawvert_cmd.py old mode 100755 new mode 100644 diff --git a/docs/dawvert.svg b/docs/dawvert.svg old mode 100755 new mode 100644 diff --git a/docs/experiments.md b/docs/experiments.md old mode 100755 new mode 100644 diff --git a/docs/input_plugins.md b/docs/input_plugins.md old mode 100755 new mode 100644 diff --git a/docs/vsts.md b/docs/vsts.md old mode 100755 new mode 100644 diff --git a/experiments_plugin_input/__init__.py b/experiments_plugin_input/__init__.py old mode 100755 new mode 100644 diff --git a/experiments_plugin_input/r_basicpitch.py b/experiments_plugin_input/r_basicpitch.py old mode 100755 new mode 100644 diff --git a/experiments_plugin_input/r_color_art.py b/experiments_plugin_input/r_color_art.py old mode 100755 new mode 100644 diff --git a/functions/audio.py b/functions/audio.py old mode 100755 new mode 100644 index 8370cb19..1a6eb265 --- a/functions/audio.py +++ b/functions/audio.py @@ -3,17 +3,38 @@ import av import os +import configparser +from os.path import exists +from tinydb import TinyDB, Query + +audioinfo_cache_filepath = './__config/cache_audioinfo.db' +db = TinyDB(audioinfo_cache_filepath) +samplesdb = Query() def get_audiofile_info(sample_filename): audio_path = '' audio_filesize = 0 - audio_crc = 0 audio_moddate = 0 - audio_duration = 0 + audio_duration = 5 audio_timebase = 44100 audio_hz = 44100 - if os.path.exists(sample_filename): + db_searchfound = db.search(samplesdb.path == sample_filename) + + out_data = {} + out_data['path'] = sample_filename + out_data['file_size'] = 0 + out_data['mod_date'] = 0 + out_data['dur'] = 1 + out_data['crc'] = 0 + out_data['audio_timebase'] = 44100 + out_data['rate'] = 44100 + out_data['dur_sec'] = 1 + + if db_searchfound != []: + out_data = db_searchfound[0] + + elif os.path.exists(sample_filename): avdata = av.open(sample_filename) audio_path = sample_filename audio_filesize = os.path.getsize(sample_filename) @@ -24,13 +45,17 @@ def get_audiofile_info(sample_filename): audio_hz_b = avdata.streams.audio[0].rate if audio_hz_b != None: audio_hz = audio_hz_b - out_data = {} - out_data['path'] = audio_path - out_data['file_size'] = audio_filesize - out_data['crc'] = audio_crc - out_data['mod_date'] = audio_moddate - out_data['dur'] = audio_duration - out_data['audio_timebase'] = audio_timebase - out_data['rate'] = audio_hz - out_data['dur_sec'] = (audio_duration/audio_timebase) + if db.search(samplesdb.path == audio_path) == []: + out_db_data = {} + out_db_data['path'] = audio_path + out_db_data['file_size'] = audio_filesize + out_db_data['mod_date'] = audio_moddate + out_db_data['dur'] = audio_duration + out_db_data['crc'] = 0 + out_db_data['audio_timebase'] = audio_timebase + out_db_data['rate'] = audio_hz + out_db_data['dur_sec'] = (audio_duration/audio_timebase) + db.insert(out_db_data) + out_data = out_db_data + return out_data diff --git a/functions/audio_wav.py b/functions/audio_wav.py old mode 100755 new mode 100644 diff --git a/functions/auto.py b/functions/auto.py old mode 100755 new mode 100644 diff --git a/functions/colors.py b/functions/colors.py old mode 100755 new mode 100644 diff --git a/functions/core.py b/functions/core.py old mode 100755 new mode 100644 index e01833f6..bb26a215 --- a/functions/core.py +++ b/functions/core.py @@ -174,8 +174,9 @@ def convert_type_output(extra_json): if out_type != 'debug': convproj_j[0] = song_compat.makecompat(convproj_j[0], in_type, in_dawcapabilities, out_dawcapabilities) - if in_type == 'r': - convproj_j[0] = song_compat.makecompat_any(convproj_j[0], in_type, in_dawcapabilities, out_dawcapabilities) + if in_type in ['r', 'm']: convproj_j[0] = song_compat.makecompat_audiostretch(convproj_j[0], in_type, in_dawcapabilities, out_dawcapabilities) + + convproj_j[0] = song_compat.makecompat_any(convproj_j[0], in_type, in_dawcapabilities, out_dawcapabilities) if in_type == 'ri' and out_type == 'mi': convproj_j[0] = song_convert.ri2mi(convproj_j[0]) if in_type == 'ri' and out_type == 'r': convproj_j[0] = song_convert.ri2r(convproj_j[0]) @@ -196,9 +197,8 @@ def convert_type_output(extra_json): if out_type != 'debug': convproj_j[0] = song_compat.makecompat(convproj_j[0], out_type, in_dawcapabilities, out_dawcapabilities) - if in_type != 'r': - convproj_j[0] = song_compat.makecompat_any(convproj_j[0], out_type, in_dawcapabilities, out_dawcapabilities) - + if out_type in ['r', 'm']: convproj_j[0] = song_compat.makecompat_audiostretch(convproj_j[0], out_type, in_dawcapabilities, out_dawcapabilities) + convproj_j[1] = currentplug_output[3] convproj_j[2] = currentplug_output[4] diff --git a/functions/data_bytes.py b/functions/data_bytes.py old mode 100755 new mode 100644 diff --git a/functions/data_values.py b/functions/data_values.py old mode 100755 new mode 100644 index 8c26dda2..4cc3b333 --- a/functions/data_values.py +++ b/functions/data_values.py @@ -61,4 +61,60 @@ def sort_pos(datapart): def list_chunks(i_list, i_amount): - return [i_list[i:i + i_amount] for i in range(0, len(i_list), i_amount)] \ No newline at end of file + return [i_list[i:i + i_amount] for i in range(0, len(i_list), i_amount)] + + + + + + +def tempo_to_rate(i_in, i_mode): + if i_mode == True: return (120/i_in) + if i_mode == False: return (i_in/120) + +def time_from_steps(i_dict, i_name, i_stretched, i_value, i_rate): + in_bpm = 1 + in_stretch = 1 + + if i_rate != None: in_stretch = i_rate + + if i_stretched == False: + out_nonstretch = i_value + out_normal = i_value*in_stretch + i_dict[i_name+'_nonstretch'] = out_nonstretch + i_dict[i_name] = out_normal + else: + out_nonstretch = i_value/in_stretch + out_normal = i_value + i_dict[i_name+'_nonstretch'] = i_value/in_stretch + i_dict[i_name] = i_value + + out_real_nonstretch = (out_nonstretch/8)*in_bpm + out_real = out_real_nonstretch/in_stretch + + i_dict[i_name+'_real_nonstretch'] = out_real_nonstretch + i_dict[i_name+'_real'] = out_real + +def time_from_seconds(i_dict, i_name, i_stretched, i_value, i_rate): + in_bpm = 1 + in_stretch = 1 + + if i_rate != None: in_stretch = i_rate + + if i_stretched == False: + out_real_nonstretch = i_value + out_real = out_real_nonstretch/in_stretch + i_dict[i_name+'_real_nonstretch'] = out_real_nonstretch + i_dict[i_name+'_real'] = out_real + + else: + out_real = i_value + out_real_nonstretch = out_real*in_stretch + i_dict[i_name+'_real_nonstretch'] = out_real_nonstretch + i_dict[i_name+'_real'] = out_real + + out_nonstretch = (out_real_nonstretch*8)*in_bpm + out_normal = out_nonstretch*in_stretch + + i_dict[i_name+'_nonstretch'] = out_nonstretch + i_dict[i_name] = out_normal diff --git a/functions/folder_samples.py b/functions/folder_samples.py old mode 100755 new mode 100644 diff --git a/functions/format_caustic.py b/functions/format_caustic.py old mode 100755 new mode 100644 diff --git a/functions/format_flp_dec.py b/functions/format_flp_dec.py index e2fca848..ea853d88 100644 --- a/functions/format_flp_dec.py +++ b/functions/format_flp_dec.py @@ -23,9 +23,14 @@ def calctempotimed(i_value): #print('VALUE', str(i_value).ljust(20), '| MUL', str(i_tempomul).ljust(20), '| OUT', str(i_out).ljust(20)) return i_out +def decodetext(event_data): + return event_data.decode('utf-16le').rstrip('\x00\x00') + # ------------- parse ------------- def parse_arrangement(arrdata): + #print(FLSplitted) + bio_fldata = create_bytesio(arrdata) output = [] while bio_fldata[0].tell() < bio_fldata[1]: @@ -38,34 +43,26 @@ def parse_arrangement(arrdata): placement['trackindex'] = int.from_bytes(bio_fldata[0].read(4), "little") placement['unknown1'] = int.from_bytes(bio_fldata[0].read(2), "little") placement['flags'] = int.from_bytes(bio_fldata[0].read(2), "little") - placement['unknown2'] = int.from_bytes(bio_fldata[0].read(2), "little") - placement['unknown3'] = int.from_bytes(bio_fldata[0].read(2), "little") - if FLSplitted[0] == '21': - placement['unknown4'] = bio_fldata[0].read(28) + placement['unknown2'] = int.from_bytes(bio_fldata[0].read(4), "little") startoffset_bytes = bio_fldata[0].read(4) endoffset_bytes = bio_fldata[0].read(4) if FLSplitted[0] == '21': - startoffset_bytes = placement['unknown4'][0:4] - endoffset_bytes = placement['unknown4'][4:8] - + placement['unknown3'] = bio_fldata[0].read(28) + startoffset = int.from_bytes(startoffset_bytes, "little") endoffset = int.from_bytes(endoffset_bytes, "little") startoffset_float = struct.unpack(' placement['patternbase']: - if startoffset != 4294967295 and startoffset != 3212836864: placement['startoffset'] = startoffset - if endoffset != 4294967295 and endoffset != 3212836864: placement['endoffset'] = endoffset + if placement['itemindex'] > placement['patternbase']: + if startoffset != 4294967295 and startoffset != 3212836864: placement['startoffset'] = startoffset + if endoffset != 4294967295 and endoffset != 3212836864: placement['endoffset'] = endoffset else: - if placement['itemindex'] > placement['patternbase']: - if startoffset != 4294967295 and startoffset != 3212836864: placement['startoffset'] = startoffset - if endoffset != 4294967295 and endoffset != 3212836864: placement['endoffset'] = endoffset - else: - if startoffset_float > 0: placement['startoffset'] = calctempotimed(startoffset_float) - if endoffset_float > 0: placement['endoffset'] = calctempotimed(endoffset_float) + #print(placement['length'], startoffset_float, endoffset_float) + if startoffset_float > 0: placement['startoffset'] = calctempotimed(startoffset_float) + if endoffset_float > 0: placement['endoffset'] = calctempotimed(endoffset_float) output.append(placement) return output @@ -175,6 +172,7 @@ def parse_flevent(datastream): def parse(inputfile): global FL_Main global FLSplitted + fileobject = open(inputfile, 'rb') headername = fileobject.read(4) rifftable = data_bytes.riff_read(fileobject, 0) @@ -221,6 +219,10 @@ def parse(inputfile): FL_FXCreationMode = 0 FL_TimeMarkers = {} FL_ChanGroupName = [] + T_FL_CurrentArrangement = '0' + FL_Arrangements[T_FL_CurrentArrangement] = {} + FL_Arrangements[T_FL_CurrentArrangement]['tracks'] = {} + FL_Arrangements[T_FL_CurrentArrangement]['items'] = {} T_FL_FXNum = -1 for event in eventtable: @@ -230,7 +232,7 @@ def parse(inputfile): if event_id == 199: FLVersion = event_data.decode('utf-8').rstrip('\x00') FLSplitted = FLVersion.split('.') - if int(FLSplitted[0]) < 20: + if int(FLSplitted[0]) < 12: print('[error] FL version '+FLSplitted[0]+' is not supported.') exit() FL_Main['Version'] = FLVersion @@ -239,15 +241,15 @@ def parse(inputfile): if event_id == 17: FL_Main['Numerator'] = event_data if event_id == 18: FL_Main['Denominator'] = event_data if event_id == 11: FL_Main['Shuffle'] = event_data - if event_id == 194: FL_Main['Title'] = event_data.decode('utf-16le').rstrip('\x00\x00') - if event_id == 206: FL_Main['Genre'] = event_data.decode('utf-16le').rstrip('\x00\x00') - if event_id == 207: FL_Main['Author'] = event_data.decode('utf-16le').rstrip('\x00\x00') - if event_id == 202: FL_Main['ProjectDataPath'] = event_data.decode('utf-16le').rstrip('\x00\x00') - if event_id == 195: FL_Main['Comment'] = event_data.decode('utf-16le').rstrip('\x00\x00') - if event_id == 197: FL_Main['URL'] = event_data.decode('utf-16le').rstrip('\x00\x00') + if event_id == 194: FL_Main['Title'] = decodetext(event_data) + if event_id == 206: FL_Main['Genre'] = decodetext(event_data) + if event_id == 207: FL_Main['Author'] = decodetext(event_data) + if event_id == 202: FL_Main['ProjectDataPath'] = decodetext(event_data) + if event_id == 195: FL_Main['Comment'] = decodetext(event_data) + if event_id == 197: FL_Main['URL'] = decodetext(event_data) if event_id == 237: FL_Main['ProjectTime'] = event_data if event_id == 10: FL_Main['ShowInfo'] = event_data - if event_id == 231: FL_ChanGroupName.append(event_data.decode('utf-16le').rstrip('\x00\x00')) + if event_id == 231: FL_ChanGroupName.append(decodetext(event_data)) if event_id == 65: T_FL_CurrentPattern = event_data @@ -296,7 +298,7 @@ def parse(inputfile): if event_data != 5328737: FL_Patterns[str(T_FL_CurrentPattern)]['color'] = event_data if event_id == 193: - FL_Patterns[str(T_FL_CurrentPattern)]['name'] = event_data.decode('utf-16le').rstrip('\x00\x00') + FL_Patterns[str(T_FL_CurrentPattern)]['name'] = decodetext(event_data) if event_id == 99: T_FL_CurrentArrangement = event_data @@ -308,7 +310,7 @@ def parse(inputfile): FL_TimeMarkers = FL_Arrangements[str(T_FL_CurrentArrangement)]['timemarkers'] TimeMarker_id = 0 if event_id == 241: - FL_Arrangements[str(T_FL_CurrentArrangement)]['name'] = event_data.decode('utf-16le').rstrip('\x00\x00') + FL_Arrangements[str(T_FL_CurrentArrangement)]['name'] = decodetext(event_data) if event_id == 233: playlistitems = parse_arrangement(event_data) FL_Arrangements[str(T_FL_CurrentArrangement)]['items'] = playlistitems @@ -322,7 +324,7 @@ def parse(inputfile): if event_id == 239: #PLTrackName if str(currenttracknum) not in FL_Tracks: FL_Tracks[str(currenttracknum)] = {} - FL_Tracks[str(currenttracknum)]['name'] = event_data.decode('utf-16le').rstrip('\x00\x00') + FL_Tracks[str(currenttracknum)]['name'] = decodetext(event_data) if event_id == 148: @@ -335,7 +337,7 @@ def parse(inputfile): FL_TimeMarkers[str(T_FL_CurrentTimeMarker)]['type'] = timemarkertype FL_TimeMarkers[str(T_FL_CurrentTimeMarker)]['pos'] = timemarkertime if event_id == 205: - event_text = event_data.decode('utf-16le').rstrip('\x00\x00') + event_text = decodetext(event_data) #print('\\__TimeMarkerName:', event_text) FL_TimeMarkers[str(T_FL_CurrentTimeMarker)]['name'] = event_text if event_id == 33: @@ -364,7 +366,7 @@ def parse(inputfile): T_FL_FXIcon = None if FL_FXCreationMode == 0: if event_id == 201: - event_text = event_data.decode('utf-16le').rstrip('\x00\x00') + event_text = decodetext(event_data) #print('\\__DefPluginName:', event_text) DefPluginName = event_text if event_id == 212: @@ -374,7 +376,7 @@ def parse(inputfile): #print(event_data) if event_id == 203: - event_text = event_data.decode('utf-16le').rstrip('\x00\x00') + event_text = decodetext(event_data) #print('\\__PluginName:', event_text) FL_Channels[str(T_FL_CurrentChannel)]['name'] = event_text if event_id == 155: @@ -416,7 +418,7 @@ def parse(inputfile): if event_id == 20: FL_Channels[str(T_FL_CurrentChannel)]['looptype'] = event_data if event_id == 135: FL_Channels[str(T_FL_CurrentChannel)]['middlenote'] = event_data if event_id == 196: - samplefilename = event_data.decode('utf-16le').rstrip('\x00\x00') + samplefilename = decodetext(event_data) if samplefilename[:21] == '%FLStudioFactoryData%': samplefilename = "C:\\Program Files\\Image-Line\\FL Studio 20" + samplefilename[21:] FL_Channels[str(T_FL_CurrentChannel)]['samplefilename'] = samplefilename @@ -439,7 +441,7 @@ def parse(inputfile): T_FL_FXColor = None T_FL_FXIcon = None if event_id == 201: - event_text = event_data.decode('utf-16le').rstrip('\x00\x00') + event_text = decodetext(event_data) #print('\\__DefPluginName:', event_text) DefPluginName = event_text if event_id == 212: @@ -449,7 +451,7 @@ def parse(inputfile): FXPlugin['data'] = event_data if event_id == 155: FXPlugin['icon'] = event_data if event_id == 128: FXPlugin['color'] = event_data - if event_id == 203: FXPlugin['name'] = event_data.decode('utf-16le').rstrip('\x00\x00') + if event_id == 203: FXPlugin['name'] = decodetext(event_data) if event_id == 98: #FXToSlotNum FL_Mixer[str(T_FL_FXNum)]['slots'][event_data] = FXPlugin FXPlugin = None @@ -458,7 +460,7 @@ def parse(inputfile): if event_id == 154: FL_Mixer[str(T_FL_FXNum)]['inchannum'] = event_data if event_id == 147: FL_Mixer[str(T_FL_FXNum)]['outchannum'] = event_data if event_id == 204: - event_text = event_data.decode('utf-16le').rstrip('\x00\x00') + event_text = decodetext(event_data) FL_Mixer[str(T_FL_FXNum+1)]['name'] = event_text output = {} diff --git a/functions/format_flp_enc.py b/functions/format_flp_enc.py old mode 100755 new mode 100644 index 33726342..9e18ae50 --- a/functions/format_flp_enc.py +++ b/functions/format_flp_enc.py @@ -57,6 +57,7 @@ def make_arrangement(data_FLdt, arrangements): BytesIO_arrangement.write(item['unknown2'].to_bytes(2, 'little')) BytesIO_arrangement.write(item['unknown3'].to_bytes(2, 'little')) + if int(item['itemindex']) > item['patternbase']: if 'startoffset' in item: BytesIO_arrangement.write(item['startoffset'].to_bytes(4, 'little')) else: BytesIO_arrangement.write(b'\xff\xff\xff\xff') @@ -69,11 +70,12 @@ def make_arrangement(data_FLdt, arrangements): if 'startoffset' in item: startoffset_out = calctempotimed(item['startoffset']) if 'endoffset' in item: endoffset_out = calctempotimed(item['endoffset']) + #print(item['length'], startoffset_out, endoffset_out) + BytesIO_arrangement.write(struct.pack(' 0: plus_offset += minuswarppos + for t_warpmarker in t_warpmarkers: + t_warpmarker['pos'] -= minuswarppos + + #print(minus_offset, plus_offset) + + audio_info = audio.get_audiofile_info(cvpj_placement['file']) + audio_dur_sec_steps = audio_info['dur_sec']*8 + + if 'stretch_algorithm' in old_audiomod: new_audiomod['stretch_algorithm'] = old_audiomod['stretch_algorithm'] + + if len(t_warpmarkers) >= 2: + t_warpmarker_last = t_warpmarkers[-1] + new_audiomod['stretch_method'] = 'rate_ignoretempo' + audiorate = (1/((t_warpmarker_last['pos']/8)/t_warpmarkers[-1]['pos_real'])) + new_audiomod['stretch_data']['rate'] = audiorate + + cvpj_placement['audiomod'] = new_audiomod + + if 'cut' in cvpj_placement: + cutdata = cvpj_placement['cut'] + + if audiorate != 1: + if cutdata['type'] == 'loop': + data_values.time_from_steps(cutdata, 'start', True, cutdata['start']+minus_offset, audiorate) + data_values.time_from_steps(cutdata, 'loopstart', True, cutdata['loopstart']+minus_offset, audiorate) + data_values.time_from_steps(cutdata, 'loopend', True, cutdata['loopend']+minus_offset, audiorate ) + cvpj_placement['position'] += plus_offset + cvpj_placement['duration'] -= plus_offset + cvpj_placement['duration'] += minus_offset + + if cutdata['type'] == 'cut': + data_values.time_from_steps(cutdata, 'start', True, cutdata['start']+minus_offset, (1/audiorate)*tempomul ) + data_values.time_from_steps(cutdata, 'end', True, cutdata['end']+minus_offset-plus_offset, (1/audiorate)*tempomul ) + + return cvpj_placements + +def rate2warp(cvpj_placements, tempo): + new_placements = [] + tempomul = (120/tempo) + + for cvpj_placement in cvpj_placements: + audiorate = 1 + ratetempo = 1 + + if 'audiomod' in cvpj_placement: + old_audiomod = cvpj_placement['audiomod'] + new_audiomod = {} + + if 'stretch_method' in old_audiomod: + if old_audiomod['stretch_method'] == 'rate_ignoretempo': + + audio_info = audio.get_audiofile_info(cvpj_placement['file']) + audio_dur_sec = audio_info['dur_sec'] + + t_stretch_data = old_audiomod['stretch_data'] + + new_audiomod = {} + new_audiomod['stretch_method'] = 'warp' + new_audiomod['stretch_algorithm'] = 'stretch' + if 'stretch_algorithm' in old_audiomod: new_audiomod['stretch_algorithm'] = old_audiomod['stretch_algorithm'] + if 'pitch' in old_audiomod: new_audiomod['pitch'] = old_audiomod['pitch'] + + audiorate = t_stretch_data['rate'] + ratetempo = 1/(audiorate/tempomul) + + #for value in [audiorate, tempomul, audiorate/tempomul]: + # print(str(value).ljust(20), end=' ') + #print() + + new_audiomod['stretch_data'] = [ + {'pos': 0.0, 'pos_real': 0.0}, + {'pos': audio_dur_sec*8, 'pos_real': (audio_dur_sec*audiorate)} + ] + + cvpj_placement['audiomod'] = new_audiomod + + + if 'cut' in cvpj_placement: + cutdata = cvpj_placement['cut'] + if cutdata['type'] == 'cut': + if 'start' not in cutdata: data_values.time_from_steps(cutdata, 'start', True, 0, audiorate) + data_values.time_from_seconds(cutdata, 'start', False, cutdata['start_real_nonstretch']*ratetempo, 1) + data_values.time_from_seconds(cutdata, 'end', False, cutdata['end_real_nonstretch']*ratetempo, 1) + + return cvpj_placements + +def r_changestretch(projJ, stretchtype): + tempo = projJ['bpm'] + if 'track_placements' in projJ: + for track_placements_id in projJ['track_placements']: + track_placements_data = projJ['track_placements'][track_placements_id] + not_laned = True + if 'laned' in track_placements_data: + print('[compat] warp2rate: laned: '+track_placements_id) + if s_pldata['laned'] == 1: + not_laned = False + s_lanedata = s_pldata['lanedata'] + s_laneordering = s_pldata['laneorder'] + for t_lanedata in s_lanedata: + tj_lanedata = s_lanedata[t_lanedata] + if 'audio' in tj_lanedata: + if stretchtype == 'rate': + print('[compat] warp2rate: laned: '+track_placements_id) + tj_lanedata['audio'] = warp2rate(tj_lanedata['audio'], tempo) + if stretchtype == 'warp': + print('[compat] rate2warp: laned: '+track_placements_id) + tj_lanedata['audio'] = rate2warp(tj_lanedata['audio'], tempo) + + if not_laned == True: + if 'audio' in track_placements_data: + if stretchtype == 'rate': + print('[compat] warp2rate: non-laned: '+track_placements_id) + track_placements_data['audio'] = warp2rate(track_placements_data['audio'], tempo) + if stretchtype == 'warp': + print('[compat] rate2warp: non-laned: '+track_placements_id) + track_placements_data['audio'] = rate2warp(track_placements_data['audio'], tempo) + + +def m_changestretch(projJ, stretchtype): + tempo = projJ['bpm'] + for playlist_id in projJ['playlist']: + playlist_id_data = projJ['playlist'][playlist_id] + if 'placements_audio' in playlist_id_data: + if stretchtype == 'rate': playlist_id_data['placements_audio'] = warp2rate(playlist_id_data['placements_audio'], tempo) + if stretchtype == 'warp': playlist_id_data['placements_audio'] = rate2warp(playlist_id_data['placements_audio'], tempo) # -------------------------------------------- track_lanes -------------------------------------------- @@ -605,6 +759,28 @@ def beats_to_seconds(cvpj_l): # -------------------------------------------- Main -------------------------------------------- + +audiostretch_processed = False + +def makecompat_audiostretch(cvpj_l, cvpj_type, in_dawcapabilities, out_dawcapabilities): + cvpj_proj = json.loads(cvpj_l) + global audiostretch_processed + if audiostretch_processed == False and cvpj_type in ['r', 'm']: + in__placement_audio_stretch = [] + out__placement_audio_stretch = [] + if 'placement_audio_stretch' in in_dawcapabilities: in__placement_audio_stretch = in_dawcapabilities['placement_audio_stretch'] + if 'placement_audio_stretch' in out_dawcapabilities: out__placement_audio_stretch = out_dawcapabilities['placement_audio_stretch'] + if 'warp' in in__placement_audio_stretch and 'warp' not in out__placement_audio_stretch: + if cvpj_type == 'm': m_changestretch(cvpj_proj, 'rate') + if cvpj_type == 'r': r_changestretch(cvpj_proj, 'rate') + audiostretch_processed = True + + if 'rate' in in__placement_audio_stretch and 'warp' in out__placement_audio_stretch: + if cvpj_type == 'm': m_changestretch(cvpj_proj, 'warp') + if cvpj_type == 'r': r_changestretch(cvpj_proj, 'warp') + audiostretch_processed = True + return json.dumps(cvpj_proj) + def makecompat_any(cvpj_l, cvpj_type, in_dawcapabilities, out_dawcapabilities): cvpj_proj = json.loads(cvpj_l) @@ -623,13 +799,14 @@ def makecompat_any(cvpj_l, cvpj_type, in_dawcapabilities, out_dawcapabilities): if 'time_seconds' in in_dawcapabilities: in__time_seconds = in_dawcapabilities['time_seconds'] if 'time_seconds' in out_dawcapabilities: out__time_seconds = out_dawcapabilities['time_seconds'] - print('[compat] '+str(in__fxrack).ljust(5)+' | '+str(out__fxrack).ljust(5)+' | fxrack') print('[compat] '+str(in__auto_nopl).ljust(5)+' | '+str(out__auto_nopl).ljust(5)+' | auto_nopl') + print('[compat] '+str(in__fxrack).ljust(5)+' | '+str(out__fxrack).ljust(5)+' | fxrack') print('[compat] '+str(in__time_seconds).ljust(5)+' | '+str(out__time_seconds).ljust(5)+' | time_seconds') if in__fxrack == False and out__fxrack == True: trackfx2fxrack(cvpj_proj, cvpj_type) if in__auto_nopl == False and out__auto_nopl == True: remove_auto_placements(cvpj_proj) if in__time_seconds == False and out__time_seconds == True: beats_to_seconds(cvpj_proj) + return json.dumps(cvpj_proj) r_processed = False @@ -673,16 +850,16 @@ def makecompat(cvpj_l, cvpj_type, in_dawcapabilities, out_dawcapabilities): if 'placement_audio_events' in out_dawcapabilities: out__placement_audio_events = out_dawcapabilities['placement_audio_events'] if isprinted == False: - print('[compat] '+str(in__track_lanes).ljust(5)+' | '+str(out__track_lanes).ljust(5)+' | track_lanes') + print('[compat] '+str(in__placement_audio_events).ljust(5)+' | '+str(out__placement_audio_events).ljust(5)+' | placement_audio_events') print('[compat] '+str(in__placement_cut).ljust(5)+' | '+str(out__placement_cut).ljust(5)+' | placement_cut') print('[compat] '+str(in__placement_loop).ljust(5)+' | '+str(out__placement_loop).ljust(5)+' | placement_loop') + print('[compat] '+str(in__track_lanes).ljust(5)+' | '+str(out__track_lanes).ljust(5)+' | track_lanes') print('[compat] '+str(in__track_nopl).ljust(5)+' | '+str(out__track_nopl).ljust(5)+' | track_nopl') - print('[compat] '+str(in__placement_audio_events).ljust(5)+' | '+str(out__placement_audio_events).ljust(5)+' | placement_audio_events') isprinted = True - if cvpj_type == 'm' and m_processed == False: - if in__placement_loop == True and out__placement_loop == False: m_removeloops(cvpj_proj) - m_processed = True + #if cvpj_type == 'm' and m_processed == False: + # if in__placement_loop == False and out__placement_loop == True: r_addloops(cvpj_proj) + # m_processed = True if cvpj_type == 'mi' and mi_processed == False: if in__placement_loop == True and out__placement_loop == False: m_removeloops(cvpj_proj) diff --git a/functions/song_convert.py b/functions/song_convert.py old mode 100755 new mode 100644 index eee8bc2c..465f2318 --- a/functions/song_convert.py +++ b/functions/song_convert.py @@ -474,6 +474,7 @@ def m2mi_checkdup(cvpj_notelistindex, nledata): return None m2mi_sample_names = ['file', 'name', 'color', 'audiomod', 'vol', 'pan', 'fxrack_channel'] +m2mi_notes_names = ['notelist', 'name', 'color'] def m2mi(song): print('[song-convert] Converting from Multiple > MultipleIndexed') @@ -495,9 +496,12 @@ def m2mi(song): if checksamenl != None: cvpj_placement['fromindex'] = checksamenl else: cvpj_notelistindex['m2mi_' + str(pattern_number)] = temp_nle + if 'color' in cvpj_placement: temp_nle['color'] = cvpj_placement['color'] + if 'name' in cvpj_placement: temp_nle['name'] = cvpj_placement['name'] cvpj_placement['fromindex'] = 'm2mi_' + str(pattern_number) del cvpj_placement['notelist'] pattern_number += 1 + cvpj_proj['notelistindex'] = cvpj_notelistindex sample_number = 1 diff --git a/functions/song_tracker.py b/functions/song_tracker.py old mode 100755 new mode 100644 diff --git a/functions/tracks.py b/functions/tracks.py old mode 100755 new mode 100644 diff --git a/functions/xtramath.py b/functions/xtramath.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/input_audiosauna.py b/functions_plugconv/input_audiosauna.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/input_flstudio.py b/functions_plugconv/input_flstudio.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/input_flstudio_wrapper.py b/functions_plugconv/input_flstudio_wrapper.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/input_jummbox.py b/functions_plugconv/input_jummbox.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/input_pxtone.py b/functions_plugconv/input_pxtone.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/input_soundchip.py b/functions_plugconv/input_soundchip.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_audiosauna_vst2.py b/functions_plugconv/output_audiosauna_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_flstudio_vst2.py b/functions_plugconv/output_flstudio_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_lmms_vst2.py b/functions_plugconv/output_lmms_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_multisampler_vst2.py b/functions_plugconv/output_multisampler_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_namco163_famistudio_vst2.py b/functions_plugconv/output_namco163_famistudio_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_onlineseq_vst2.py b/functions_plugconv/output_onlineseq_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_piyopiyo_vst2.py b/functions_plugconv/output_piyopiyo_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_retro_vst2.py b/functions_plugconv/output_retro_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_sampler_vst2.py b/functions_plugconv/output_sampler_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_simple_vst2.py b/functions_plugconv/output_simple_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_slicer_vst2.py b/functions_plugconv/output_slicer_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugconv/output_soundchip_vst2.py b/functions_plugconv/output_soundchip_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugin/lmms_auto.py b/functions_plugin/lmms_auto.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/data_nullbytegroup.py b/functions_plugparams/data_nullbytegroup.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/data_vc2xml.py b/functions_plugparams/data_vc2xml.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/file_vst2.py b/functions_plugparams/file_vst2.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/params_drops.py b/functions_plugparams/params_drops.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/params_grace.py b/functions_plugparams/params_grace.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/params_kickmess.py b/functions_plugparams/params_kickmess.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/params_ninjas2.py b/functions_plugparams/params_ninjas2.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/params_oxe_fm.py b/functions_plugparams/params_oxe_fm.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/params_various_fx.py b/functions_plugparams/params_various_fx.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/params_various_inst.py b/functions_plugparams/params_various_inst.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/params_vital.py b/functions_plugparams/params_vital.py old mode 100755 new mode 100644 diff --git a/functions_plugparams/params_vital_wavetable.py b/functions_plugparams/params_vital_wavetable.py old mode 100755 new mode 100644 diff --git a/plugin_input/__init__.py b/plugin_input/__init__.py old mode 100755 new mode 100644 diff --git a/plugin_input/ableton.py b/plugin_input/ableton.py old mode 100755 new mode 100644 index 9edc7f61..fb0d8c33 --- a/plugin_input/ableton.py +++ b/plugin_input/ableton.py @@ -5,7 +5,7 @@ from functions import tracks from functions import colors from functions import audio -from functions import placement_data +from functions import data_values import xml.etree.ElementTree as ET import plugin_input @@ -54,13 +54,9 @@ def gettype(self): return 'r' def supported_autodetect(self): return False def getdawcapabilities(self): return { - 'fxrack': False, - 'track_lanes': False, 'placement_cut': True, 'placement_loop': True, - 'track_nopl': False, - 'auto_nopl': False, - 'placement_audio_events': False, + 'placement_audio_stretch': ['warp'], } def parse(self, input_file, extra_param): @@ -202,8 +198,8 @@ def parse(self, input_file, extra_param): for t_note in t_notes: cvpj_placement['notelist'].append(t_notes[t_note]) - #for value in [note_placement_pos, note_placement_dur+note_placement_pos, note_placement_loop_start, note_placement_loop_l_start, note_placement_loop_l_end]: - # print(str(value).ljust(20), end=' ') + #for value in ["CurrentStart", "CurrentEnd", "StartRelative", "LoopStart", "LoopEnd"]: + # print(get_value(x_track_MidiClip, 'CurrentEnd', 0).ljust(20), end=' ') #print() tracks.r_pl_notes(cvpj_l, track_id, cvpj_placement) @@ -273,33 +269,31 @@ def parse(self, input_file, extra_param): cvpj_placement['fade']['out']['slope'] = float(get_value(x_track_AudioClip_fades, 'FadeOutCurveSlope', 0)) cvpj_placement['audiomod'] = {} - cvpj_placement['audiomod']['stretch'] = {} - cvpj_stretch = cvpj_placement['audiomod']['stretch'] + cvpj_audiomod = cvpj_placement['audiomod'] if audio_placement_warp_on == 1: - cvpj_stretch['enabled'] = True - cvpj_stretch['params'] = {} + cvpj_audiomod['stretch_method'] = 'warp' + cvpj_audiomod['stretch_params'] = {} if audio_placement_warp_mode == 0: - cvpj_stretch['mode'] = 'ableton_beats' - cvpj_stretch['params']['TransientResolution'] = int(get_value(x_track_AudioClip, 'TransientResolution', 6)) - cvpj_stretch['params']['TransientLoopMode'] = int(get_value(x_track_AudioClip, 'TransientLoopMode', 2)) - cvpj_stretch['params']['TransientEnvelope'] = int(get_value(x_track_AudioClip, 'TransientEnvelope', 100)) + cvpj_audiomod['stretch_algorithm'] = 'ableton_beats' + cvpj_audiomod['stretch_params']['TransientResolution'] = int(get_value(x_track_AudioClip, 'TransientResolution', 6)) + cvpj_audiomod['stretch_params']['TransientLoopMode'] = int(get_value(x_track_AudioClip, 'TransientLoopMode', 2)) + cvpj_audiomod['stretch_params']['TransientEnvelope'] = int(get_value(x_track_AudioClip, 'TransientEnvelope', 100)) if audio_placement_warp_mode == 1: - cvpj_stretch['mode'] = 'ableton_tones' - cvpj_stretch['params']['GranularityTones'] = float(get_value(x_track_AudioClip, 'GranularityTones', 30)) + cvpj_audiomod['stretch_algorithm'] = 'ableton_tones' + cvpj_audiomod['stretch_params']['GranularityTones'] = float(get_value(x_track_AudioClip, 'GranularityTones', 30)) if audio_placement_warp_mode == 2: - cvpj_stretch['mode'] = 'ableton_texture' - cvpj_stretch['params']['GranularityTexture'] = float(get_value(x_track_AudioClip, 'GranularityTexture', 71.328125)) - cvpj_stretch['params']['FluctuationTexture'] = float(get_value(x_track_AudioClip, 'FluctuationTexture', 27.34375)) + cvpj_audiomod['stretch_algorithm'] = 'ableton_texture' + cvpj_audiomod['stretch_params']['GranularityTexture'] = float(get_value(x_track_AudioClip, 'GranularityTexture', 71.328125)) + cvpj_audiomod['stretch_params']['FluctuationTexture'] = float(get_value(x_track_AudioClip, 'FluctuationTexture', 27.34375)) if audio_placement_warp_mode == 3: - cvpj_stretch['mode'] = 'resample' + cvpj_audiomod['stretch_algorithm'] = 'resample' if audio_placement_warp_mode == 4: - cvpj_stretch['mode'] = 'ableton_complex' + cvpj_audiomod['stretch_algorithm'] = 'ableton_complex' if audio_placement_warp_mode == 6: - cvpj_stretch['mode'] = 'stretch_complexpro' - cvpj_stretch['params']['ComplexProFormants'] = float(get_value(x_track_AudioClip, 'ComplexProFormants', 100)) - cvpj_stretch['params']['ComplexProEnvelope'] = int(get_value(x_track_AudioClip, 'ComplexProEnvelope', 120)) - + cvpj_audiomod['stretch_algorithm'] = 'stretch_complexpro' + cvpj_audiomod['stretch_params']['ComplexProFormants'] = float(get_value(x_track_AudioClip, 'ComplexProFormants', 100)) + cvpj_audiomod['stretch_params']['ComplexProEnvelope'] = int(get_value(x_track_AudioClip, 'ComplexProEnvelope', 120)) x_track_AudioClip_WarpMarkers_bef = x_track_AudioClip.findall('WarpMarkers')[0] x_track_AudioClip_WarpMarkers = x_track_AudioClip_WarpMarkers_bef.findall('WarpMarker') @@ -311,54 +305,58 @@ def parse(self, input_file, extra_param): onedur = t_warpmarker['pos_real']/audio_sampleref['seconds'] t_warpmarkers.append(t_warpmarker) - cvpj_stretch['time'] = {} - cvpj_stretch['time']['type'] = 'none' - cvpj_stretch['time']['data'] = {} - - if len(t_warpmarkers) == 2: - t_warpmarker_last = t_warpmarkers[-1] - cvpj_stretch['time']['type'] = 'rate_timed' - audiorate = ((t_warpmarker_last['pos']/8)/t_warpmarker_last['pos_real'])*(120/tempo) - cvpj_stretch['time']['data']['rate'] = audiorate - - if len(t_warpmarkers) >= 3: - del t_warpmarkers[-1] - t_warpmarker_last = t_warpmarkers[-1] - cvpj_stretch['time']['type'] = 'rate_timed' - audiorate = (t_warpmarker_last['pos']/audio_sampleref_steps)*(120/tempo) - cvpj_stretch['time']['data']['rate'] = audiorate + cvpj_audiomod['stretch_data'] = t_warpmarkers + + #cvpj_stretch['time'] = {} + #cvpj_stretch['time']['type'] = 'none' + #cvpj_stretch['time']['data'] = {} + + #if len(t_warpmarkers) == 2: + # t_warpmarker_last = t_warpmarkers[-1] + # cvpj_stretch['time']['type'] = 'rate_timed' + # audiorate = ((t_warpmarker_last['pos']/8)/t_warpmarker_last['pos_real'])*(120/tempo) + # cvpj_stretch['time']['data']['rate'] = audiorate + + #if len(t_warpmarkers) >= 3: + # del t_warpmarkers[-1] + # t_warpmarker_last = t_warpmarkers[-1] + # cvpj_stretch['time']['type'] = 'rate_timed' + # audiorate = (t_warpmarker_last['pos']/audio_sampleref_steps)*(120/tempo) + # cvpj_stretch['time']['data']['rate'] = audiorate #print(cvpj_stretch['time']['data']['rate']) else: - cvpj_stretch['enabled'] = False + cvpj_audiomod['stretch_method'] = None audio_placement_PitchCoarse = float(get_value(x_track_AudioClip, 'PitchCoarse', 0)) audio_placement_PitchFine = float(get_value(x_track_AudioClip, 'PitchFine', 0)) - cvpj_stretch['pitch'] = audio_placement_PitchCoarse + audio_placement_PitchFine/100 + cvpj_audiomod['pitch'] = audio_placement_PitchCoarse + audio_placement_PitchFine/100 - #for value in [t_CurrentStart, t_CurrentEnd, audio_placement_loop_start, audio_placement_loop_l_start, audio_placement_loop_l_end]: - # print(str(value).ljust(20), end=' ') + #for value in ["CurrentStart", "CurrentEnd", "StartRelative", "LoopStart", "LoopEnd"]: + # print(str(get_value(x_track_AudioClip, value, 0)).ljust(20), end=' ') #print() if audio_placement_warp_on == False: if audio_placement_loop_on == 0: cvpj_placement['cut'] = {} cvpj_placement['cut']['type'] = 'cut' - placement_data.time_from_seconds(cvpj_placement['cut'], 'start', False, audio_placement_loop_l_start/4, tempo, 1) - placement_data.time_from_seconds(cvpj_placement['cut'], 'end', False, audio_placement_loop_l_end/4, tempo, 1) + data_values.time_from_seconds(cvpj_placement['cut'], 'start', False, audio_placement_loop_l_start/4, 1) + data_values.time_from_seconds(cvpj_placement['cut'], 'end', False, audio_placement_loop_l_end/4, 1) else: if audio_placement_loop_on == 0: cvpj_placement['cut'] = {} cvpj_placement['cut']['type'] = 'cut' - placement_data.time_from_steps(cvpj_placement['cut'], 'start', True, audio_placement_loop_l_start, tempo, audiorate) - placement_data.time_from_steps(cvpj_placement['cut'], 'end', True, audio_placement_loop_l_end, tempo, audiorate) + data_values.time_from_seconds(cvpj_placement['cut'], 'start', True, audio_placement_loop_l_start/8, 1) + data_values.time_from_seconds(cvpj_placement['cut'], 'end', True, audio_placement_loop_l_end/8, 1) else: cvpj_placement['cut'] = {} cvpj_placement['cut']['type'] = 'loop' - placement_data.time_from_steps(cvpj_placement['cut'], 'start', True, audio_placement_loop_start, tempo, audiorate) - placement_data.time_from_steps(cvpj_placement['cut'], 'loopstart', True, audio_placement_loop_l_start, tempo, audiorate) - placement_data.time_from_steps(cvpj_placement['cut'], 'loopend', True, audio_placement_loop_l_end, tempo, audiorate) + data_values.time_from_steps(cvpj_placement['cut'], 'start', False, audio_placement_loop_start, 1) + data_values.time_from_steps(cvpj_placement['cut'], 'loopstart', False, audio_placement_loop_l_start, 1) + data_values.time_from_steps(cvpj_placement['cut'], 'loopend', False, audio_placement_loop_l_end, 1) + + #print(cvpj_placement['cut']) #if 'cut' in cvpj_placement: # print(cvpj_placement['cut']) @@ -370,7 +368,6 @@ def parse(self, input_file, extra_param): sendid = track_sendholder.get('Id') sendlevel = get_param(track_sendholder, 'Send', 'float', 0) tracks.r_add_send(cvpj_l, track_id, 'return_'+str(sendid), sendlevel, None) - sendcount += 1 if tracktype == 'ReturnTrack': diff --git a/plugin_input/cvpj_m.py b/plugin_input/cvpj_m.py old mode 100755 new mode 100644 diff --git a/plugin_input/cvpj_mi.py b/plugin_input/cvpj_mi.py old mode 100755 new mode 100644 diff --git a/plugin_input/cvpj_r.py b/plugin_input/cvpj_r.py old mode 100755 new mode 100644 diff --git a/plugin_input/m_adlib_rol.py b/plugin_input/m_adlib_rol.py old mode 100755 new mode 100644 diff --git a/plugin_input/m_lovelycomposer.py b/plugin_input/m_lovelycomposer.py old mode 100755 new mode 100644 diff --git a/plugin_input/m_mariopaint_msq.py b/plugin_input/m_mariopaint_msq.py old mode 100755 new mode 100644 diff --git a/plugin_input/m_mariopaint_mss.py b/plugin_input/m_mariopaint_mss.py old mode 100755 new mode 100644 diff --git a/plugin_input/m_mariopaint_smp.py b/plugin_input/m_mariopaint_smp.py old mode 100755 new mode 100644 diff --git a/plugin_input/m_mc_noteblock_studio.py b/plugin_input/m_mc_noteblock_studio.py old mode 100755 new mode 100644 diff --git a/plugin_input/m_midi.py b/plugin_input/m_midi.py old mode 100755 new mode 100644 diff --git a/plugin_input/m_pxtone.py b/plugin_input/m_pxtone.py old mode 100755 new mode 100644 diff --git a/plugin_input/m_smaf.py b/plugin_input/m_smaf.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_boscaceoil.py b/plugin_input/mi_boscaceoil.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_deflemask.py b/plugin_input/mi_deflemask.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_famistudiotxt.py b/plugin_input/mi_famistudiotxt.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_famitrackertxt.py b/plugin_input/mi_famitrackertxt.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_flp.py b/plugin_input/mi_flp.py old mode 100755 new mode 100644 index 565a1bd8..e7fed317 --- a/plugin_input/mi_flp.py +++ b/plugin_input/mi_flp.py @@ -15,7 +15,7 @@ from functions import data_bytes from functions import colors from functions import notelist_data -from functions import placement_data +from functions import data_values from functions import song from functions import audio @@ -48,7 +48,8 @@ def getdawcapabilities(self): return { 'fxrack': True, 'track_lanes': True, - 'placement_cut': True + 'placement_cut': True, + 'placement_audio_stretch': ['rate', 'rate_ignoretempo'] } def supported_autodetect(self): return True def detect(self, input_file): @@ -81,7 +82,6 @@ def parse(self, input_file, extra_param): if 'Tempo' in FL_Main: cvpj_l['bpm'] = FL_Main['Tempo'] else: cvpj_l['bpm'] = 120 if 'Shuffle' in FL_Main: cvpj_l['shuffle'] = FL_Main['Shuffle']/128 - tempomul = cvpj_l['bpm']/120 cvpj_l_instrument_data = {} cvpj_l_instrument_order = [] @@ -149,58 +149,52 @@ def parse(self, input_file, extra_param): if filename_sample in filename_len: ald = filename_len[filename_sample] stretchbpm = (ald['dur_sec']*(cvpj_l['bpm']/120)) - cvpj_s_sample['audiomod'] = {} - cvpj_s_sample['audiomod']['stretch'] = {} - cvpj_s_stretch = cvpj_s_sample['audiomod']['stretch'] + cvpj_audiomod = cvpj_s_sample['audiomod'] = {} t_stretchingmode = 0 t_stretchingtime = 0 t_stretchingmultiplier = 1 t_stretchingpitch = 0 + cvpj_audiomod['stretch_method'] = None if 'stretchingpitch' in channeldata: t_stretchingpitch += channeldata['stretchingpitch']/100 if 'middlenote' in channeldata: t_stretchingpitch += (channeldata['middlenote']-60)*-1 if 'pitch' in channeldata: t_stretchingpitch += channeldata['pitch']/100 - cvpj_s_sample['audiomod']['stretch']['pitch'] = t_stretchingpitch + cvpj_audiomod['pitch'] = t_stretchingpitch if 'stretchingtime' in channeldata: t_stretchingtime = channeldata['stretchingtime']/384 if 'stretchingmode' in channeldata: t_stretchingmode = channeldata['stretchingmode'] if 'stretchingmultiplier' in channeldata: t_stretchingmultiplier = pow(2, channeldata['stretchingmultiplier']/10000) - if t_stretchingmode == -1: cvpj_s_stretch['mode'] = 'stretch' - if t_stretchingmode == 0: cvpj_s_stretch['mode'] = 'resample' - if t_stretchingmode == 1: cvpj_s_stretch['mode'] = 'elastique_v3' - if t_stretchingmode == 2: cvpj_s_stretch['mode'] = 'elastique_v3_mono' - if t_stretchingmode == 3: cvpj_s_stretch['mode'] = 'slice_stretch' - if t_stretchingmode == 5: cvpj_s_stretch['mode'] = 'auto' - if t_stretchingmode == 4: cvpj_s_stretch['mode'] = 'slice_map' - if t_stretchingmode == 6: cvpj_s_stretch['mode'] = 'elastique_v2' - if t_stretchingmode == 7: cvpj_s_stretch['mode'] = 'elastique_v2_transient' - if t_stretchingmode == 8: cvpj_s_stretch['mode'] = 'elastique_v2_mono' - if t_stretchingmode == 9: cvpj_s_stretch['mode'] = 'elastique_v2_speech' - - if t_stretchingtime != 0 or t_stretchingmultiplier != 1 or t_stretchingpitch != 0: - cvpj_s_stretch['enabled'] = True + if t_stretchingmode == -1: cvpj_audiomod['stretch_algorithm'] = 'stretch' + if t_stretchingmode == 0: cvpj_audiomod['stretch_algorithm'] = 'resample' + if t_stretchingmode == 1: cvpj_audiomod['stretch_algorithm'] = 'elastique_v3' + if t_stretchingmode == 2: cvpj_audiomod['stretch_algorithm'] = 'elastique_v3_mono' + if t_stretchingmode == 3: cvpj_audiomod['stretch_algorithm'] = 'slice_stretch' + if t_stretchingmode == 5: cvpj_audiomod['stretch_algorithm'] = 'auto' + if t_stretchingmode == 4: cvpj_audiomod['stretch_algorithm'] = 'slice_map' + if t_stretchingmode == 6: cvpj_audiomod['stretch_algorithm'] = 'elastique_v2' + if t_stretchingmode == 7: cvpj_audiomod['stretch_algorithm'] = 'elastique_v2_transient' + if t_stretchingmode == 8: cvpj_audiomod['stretch_algorithm'] = 'elastique_v2_mono' + if t_stretchingmode == 9: cvpj_audiomod['stretch_algorithm'] = 'elastique_v2_speech' + + #if t_stretchingtime != 0 or t_stretchingmultiplier != 1 or t_stretchingpitch != 0: if t_stretchingtime != 0: - cvpj_s_stretch['time'] = {} - cvpj_s_stretch['time']['type'] = 'rate_timed' - cvpj_s_stretch['time']['data'] = {} - cvpj_s_stretch['time']['data']['rate'] = (t_stretchingtime/stretchbpm)*t_stretchingmultiplier - samplestretch[instrument] = (t_stretchingtime/stretchbpm)*t_stretchingmultiplier + cvpj_audiomod['stretch_method'] = 'rate_ignoretempo' + cvpj_audiomod['stretch_data'] = {} + cvpj_audiomod['stretch_data']['rate'] = (ald['dur_sec']/t_stretchingtime)/t_stretchingmultiplier + samplestretch[instrument] = (ald['dur_sec']/t_stretchingtime)*t_stretchingmultiplier elif t_stretchingtime == 0: - cvpj_s_stretch['time'] = {} - cvpj_s_stretch['time']['type'] = 'rate_nontimed' - cvpj_s_stretch['time']['data'] = {} - cvpj_s_stretch['time']['data']['rate'] = t_stretchingmultiplier + cvpj_audiomod['stretch_method'] = 'rate' + cvpj_audiomod['stretch_data'] = {} + cvpj_audiomod['stretch_data']['rate'] = 1/t_stretchingmultiplier samplestretch[instrument] = 1*t_stretchingmultiplier else: samplestretch[instrument] = 1 - print(cvpj_s_stretch) - cvpj_l_samples['FLSample' + str(instrument)] = cvpj_s_sample @@ -259,6 +253,7 @@ def parse(self, input_file, extra_param): if len(FL_Arrangements) != 0: FL_Arrangement = FL_Arrangements['0'] for item in FL_Arrangement['items']: + arrangementitemJ = {} arrangementitemJ['position'] = item['position']/ppq*4 arrangementitemJ['duration'] = item['length']/ppq*4 @@ -271,6 +266,7 @@ def parse(self, input_file, extra_param): cvpj_l_playlist[str(playlistline)]['placements_audio'] = [] if item['itemindex'] > item['patternbase']: + arrangementitemJ['fromindex'] = 'FLPat' + str(item['itemindex'] - item['patternbase']) cvpj_l_playlist[str(playlistline)]['placements_notes'].append(arrangementitemJ) if 'startoffset' in item or 'endoffset' in item: @@ -281,6 +277,7 @@ def parse(self, input_file, extra_param): else: + arrangementitemJ['fromindex'] = 'FLSample' + str(item['itemindex']) cvpj_l_playlist[str(playlistline)]['placements_audio'].append(arrangementitemJ) if 'startoffset' in item or 'endoffset' in item: @@ -290,11 +287,12 @@ def parse(self, input_file, extra_param): pl_stretch = samplestretch[str(item['itemindex'])] if 'startoffset' in item: - placement_data.time_from_steps(arrangementitemJ['cut'], 'start', False, item['startoffset'], cvpj_l['bpm'], pl_stretch) + data_values.time_from_steps(arrangementitemJ['cut'], 'start', False, item['startoffset'], pl_stretch) if 'endoffset' in item: - placement_data.time_from_steps(arrangementitemJ['cut'], 'end', False, item['endoffset'], cvpj_l['bpm'], pl_stretch) + data_values.time_from_steps(arrangementitemJ['cut'], 'end', False, item['endoffset'], pl_stretch) + #print(arrangementitemJ) FL_Tracks = FL_Arrangement['tracks'] diff --git a/plugin_input/mi_jummbox.py b/plugin_input/mi_jummbox.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_notessimo_v2.py b/plugin_input/mi_notessimo_v2.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_notessimo_v3.py b/plugin_input/mi_notessimo_v3.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_pixitracker.py b/plugin_input/mi_pixitracker.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_tracker_it.py b/plugin_input/mi_tracker_it.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_tracker_mod.py b/plugin_input/mi_tracker_mod.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_tracker_s3m.py b/plugin_input/mi_tracker_s3m.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_tracker_umx.py b/plugin_input/mi_tracker_umx.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_tracker_xm.py b/plugin_input/mi_tracker_xm.py old mode 100755 new mode 100644 diff --git a/plugin_input/mi_trackerboy.py b/plugin_input/mi_trackerboy.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_audiosauna.py b/plugin_input/r_audiosauna.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_dawproject.py b/plugin_input/r_dawproject.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_flipperzero.py b/plugin_input/r_flipperzero.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_lmms.py b/plugin_input/r_lmms.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_mekimeki.py b/plugin_input/r_mekimeki.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_onlineseq.py b/plugin_input/r_onlineseq.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_orgyana.py b/plugin_input/r_orgyana.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_petaporon.py b/plugin_input/r_petaporon.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_piyopiyo.py b/plugin_input/r_piyopiyo.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_soundation.py b/plugin_input/r_soundation.py old mode 100755 new mode 100644 diff --git a/plugin_input/r_wavtool.py b/plugin_input/r_wavtool.py old mode 100755 new mode 100644 index c96163b7..545ba886 --- a/plugin_input/r_wavtool.py +++ b/plugin_input/r_wavtool.py @@ -7,7 +7,6 @@ from functions import folder_samples from functions import note_data from functions import tracks -from functions import placement_data import plugin_input import json @@ -79,9 +78,14 @@ def parse_clip_audio(j_wvtl_trackclip, j_wvtl_tracktype): cvpj_pldata["duration"] = j_wvtl_trc_timelineEnd*4 - j_wvtl_trc_timelineStart*4 cvpj_pldata['cut'] = {} cvpj_pldata['cut']['type'] = 'loop' - placement_data.time_from_steps(cvpj_pldata['cut'], 'start', True, j_wvtl_trc_readStart*4, j_wvtl_bpm, 1) - placement_data.time_from_steps(cvpj_pldata['cut'], 'loopstart', True, j_wvtl_trc_loopStart*4, j_wvtl_bpm, 1) - placement_data.time_from_steps(cvpj_pldata['cut'], 'loopend', True, j_wvtl_trc_loopEnd*4, j_wvtl_bpm, 1) + data_values.time_from_steps(cvpj_pldata['cut'], 'start', True, j_wvtl_trc_readStart*4, 1) + data_values.time_from_steps(cvpj_pldata['cut'], 'loopstart', True, j_wvtl_trc_loopStart*4, 1) + data_values.time_from_steps(cvpj_pldata['cut'], 'loopend', True, j_wvtl_trc_loopEnd*4, 1) + + cvpj_pldata['audiomod'] = {} + cvpj_pldata['audiomod']['stretch_algorithm'] = 'stretch' + cvpj_pldata['audiomod']['stretch_method'] = 'rate_ignoretempo' + cvpj_pldata['audiomod']['stretch_data'] = {'rate': 1} if 'audioBufferId' in j_wvtl_trackclip: audio_filename = extract_audio(j_wvtl_trackclip['audioBufferId']) diff --git a/plugin_input/ri_caustic.py b/plugin_input/ri_caustic.py old mode 100755 new mode 100644 diff --git a/plugin_input/ri_soundclub2.py b/plugin_input/ri_soundclub2.py old mode 100755 new mode 100644 diff --git a/plugin_output/__init__.py b/plugin_output/__init__.py old mode 100755 new mode 100644 diff --git a/plugin_output/ableton.py b/plugin_output/ableton.py old mode 100755 new mode 100644 index 4ca01673..3e790b4e --- a/plugin_output/ableton.py +++ b/plugin_output/ableton.py @@ -504,52 +504,51 @@ def create_clip(xmltag, cliptype, cvpj_placement, trackcolor): t_LoopEnd = None if 'audiomod' in cvpj_placement: - if 'stretch' in cvpj_placement['audiomod']: - cvpj_stretch = cvpj_placement['audiomod']['stretch'] - - if 'pitch' in cvpj_stretch: stretch_t_pitch = cvpj_stretch['pitch'] - - if 'enabled' in cvpj_stretch: - if cvpj_stretch['enabled'] == True: w_IsWarped = 'true' - - if w_IsWarped == 'true': - cvpj_isstretched = True - if 'params' in cvpj_stretch: - stretch_params = cvpj_stretch['params'] - if 'ComplexProEnvelope' in stretch_params: w_ComplexProEnvelope = stretch_params['ComplexProEnvelope'] - if 'ComplexProFormants' in stretch_params: w_ComplexProEnvelope = stretch_params['ComplexProFormants'] - if 'FluctuationTexture' in stretch_params: w_ComplexProEnvelope = stretch_params['FluctuationTexture'] - if 'GranularityTexture' in stretch_params: w_ComplexProEnvelope = stretch_params['GranularityTexture'] - if 'GranularityTones' in stretch_params: w_ComplexProEnvelope = stretch_params['GranularityTones'] - if 'TransientEnvelope' in stretch_params: w_ComplexProEnvelope = stretch_params['TransientEnvelope'] - if 'TransientLoopMode' in stretch_params: w_ComplexProEnvelope = stretch_params['TransientLoopMode'] - if 'TransientResolution' in stretch_params: w_ComplexProEnvelope = stretch_params['TransientResolution'] - - if 'mode' in cvpj_stretch: - if cvpj_stretch['mode'] == 'ableton_beats': w_WarpMode = 0 - if cvpj_stretch['mode'] == 'ableton_tones': w_WarpMode = 1 - if cvpj_stretch['mode'] == 'ableton_texture': w_WarpMode = 2 - if cvpj_stretch['mode'] == 'resample': w_WarpMode = 3 - if cvpj_stretch['mode'] == 'ableton_complex': w_WarpMode = 4 - if cvpj_stretch['mode'] == 'stretch_complexpro': w_WarpMode = 6 - cvpj_stretchmode = cvpj_stretch['mode'] - - if 'time' in cvpj_stretch: - if 'type' in cvpj_stretch['time'] and 'data' in cvpj_stretch['time']: - timedata = cvpj_stretch['time']['data'] - - if cvpj_stretch['time']['type'] == 'rate_timed': - speedrate = timedata['rate'] - rate_fixed = (1/speedrate)*AudioDuration - w_timemarkers = [{'pos': 0.0, 'pos_real': 0.0}, {'pos': normalspeed*8, 'pos_real': rate_fixed}] - - if cvpj_stretch['time']['type'] == 'rate_nontimed': - speedrate = timedata['rate'] - rate_fixed = (AudioDuration/speedrate) - if cvpj_stretch['mode'] == 'resample': rate_fixed *= pow(2, stretch_t_pitch/12) - w_timemarkers = [{'pos': 0.0, 'pos_real': 0.0}, {'pos': normalspeed*8, 'pos_real': rate_fixed}] - - if cvpj_stretch['mode'] == 'resample': cvpj_resample_pitch = pow(2, stretch_t_pitch/12) + cvpj_audiomod = cvpj_placement['audiomod'] + + if 'pitch' in cvpj_audiomod: stretch_t_pitch = cvpj_audiomod['pitch'] + if 'stretch_method' in cvpj_audiomod: + if cvpj_audiomod['stretch_method'] == 'warp': w_IsWarped = 'true' + + if w_IsWarped == 'true': + if 'stretch_params' in cvpj_audiomod: + stretch_params = cvpj_audiomod['stretch_params'] + if 'ComplexProEnvelope' in stretch_params: w_ComplexProEnvelope = stretch_params['ComplexProEnvelope'] + if 'ComplexProFormants' in stretch_params: w_ComplexProFormants = stretch_params['ComplexProFormants'] + if 'FluctuationTexture' in stretch_params: w_FluctuationTexture = stretch_params['FluctuationTexture'] + if 'GranularityTexture' in stretch_params: w_GranularityTexture = stretch_params['GranularityTexture'] + if 'GranularityTones' in stretch_params: w_GranularityTones = stretch_params['GranularityTones'] + if 'TransientEnvelope' in stretch_params: w_TransientEnvelope = stretch_params['TransientEnvelope'] + if 'TransientLoopMode' in stretch_params: w_TransientLoopMode = stretch_params['TransientLoopMode'] + if 'TransientResolution' in stretch_params: w_TransientResolution = stretch_params['TransientResolution'] + + if 'stretch_algorithm' in cvpj_audiomod: + if cvpj_audiomod['stretch_algorithm'] == 'ableton_beats': w_WarpMode = 0 + if cvpj_audiomod['stretch_algorithm'] == 'ableton_tones': w_WarpMode = 1 + if cvpj_audiomod['stretch_algorithm'] == 'ableton_texture': w_WarpMode = 2 + if cvpj_audiomod['stretch_algorithm'] == 'resample': w_WarpMode = 3 + if cvpj_audiomod['stretch_algorithm'] == 'ableton_complex': w_WarpMode = 4 + if cvpj_audiomod['stretch_algorithm'] == 'stretch_complexpro': w_WarpMode = 6 + + if 'stretch_data' in cvpj_audiomod: + w_timemarkers = cvpj_audiomod['stretch_data'] + + #if 'time' in cvpj_stretch: + # if 'type' in cvpj_stretch['time'] and 'data' in cvpj_stretch['time']: + # timedata = cvpj_stretch['time']['data'] + + # if cvpj_stretch['time']['type'] == 'rate_timed': + # speedrate = timedata['rate'] + # rate_fixed = (1/speedrate)*AudioDuration + # w_timemarkers = [{'pos': 0.0, 'pos_real': 0.0}, {'pos': normalspeed*8, 'pos_real': rate_fixed}] + + # if cvpj_stretch['time']['type'] == 'rate_nontimed': + # speedrate = timedata['rate'] + # rate_fixed = (AudioDuration/speedrate) + # if cvpj_stretch['mode'] == 'resample': rate_fixed *= pow(2, stretch_t_pitch/12) + # w_timemarkers = [{'pos': 0.0, 'pos_real': 0.0}, {'pos': normalspeed*8, 'pos_real': rate_fixed}] + + #if cvpj_stretch['mode'] == 'resample': cvpj_resample_pitch = pow(2, stretch_t_pitch/12) #((t_CurrentEnd)*tempomul)/speedrate @@ -559,7 +558,7 @@ def create_clip(xmltag, cliptype, cvpj_placement, trackcolor): if 'type' in cvpj_placement_cut: - if cvpj_isstretched == True: + if w_IsWarped == 'true': if cvpj_placement_cut['type'] == 'cut': if 'start' in cvpj_placement_cut: t_LoopStart = cvpj_placement_cut['start']/4 if 'end' in cvpj_placement_cut: t_LoopEnd = cvpj_placement_cut['end']/4 @@ -569,22 +568,10 @@ def create_clip(xmltag, cliptype, cvpj_placement, trackcolor): if 'loopstart' in cvpj_placement_cut: t_LoopStart = cvpj_placement_cut['loopstart']/4 if 'loopend' in cvpj_placement_cut: t_LoopEnd = cvpj_placement_cut['loopend']/4 - else: if cvpj_placement_cut['type'] == 'cut': if 'start' in cvpj_placement_cut: t_LoopStart = cvpj_placement_cut['start_real'] if 'end' in cvpj_placement_cut: t_LoopEnd = cvpj_placement_cut['end_real'] - if cvpj_placement_cut['type'] == 'loop': - t_LoopOn = 'true' - w_IsWarped = 'true' - cvpj_placement['audiomod'] = {} - cvpj_placement['audiomod']['stretch'] = {} - cvpj_placement['audiomod']['stretch']['enabled'] = True - cvpj_placement['audiomod']['stretch']['time'] = {'type': 'rate_timed', 'data': {'rate': 1}} - - if 'start' in cvpj_placement_cut: t_StartRelative = cvpj_placement_cut['start']/4 - if 'loopstart' in cvpj_placement_cut: t_LoopStart = cvpj_placement_cut['loopstart']/4 - if 'loopend' in cvpj_placement_cut: t_LoopEnd = cvpj_placement_cut['loopend']/4 if t_LoopEnd == None: if w_IsWarped == 'true': @@ -635,6 +622,10 @@ def create_clip(xmltag, cliptype, cvpj_placement, trackcolor): addvalue(x_ClipData_loop, 'HiddenLoopStart', t_LoopStart) addvalue(x_ClipData_loop, 'HiddenLoopEnd', t_LoopEnd) + #for value in [t_CurrentStart, t_CurrentEnd, t_StartRelative, t_LoopStart, t_LoopEnd]: + # print(str(value).ljust(20), end=' ') + #print() + addvalue(x_ClipData, 'Name', t_name) addvalue(x_ClipData, 'Annotation', '') addvalue(x_ClipData, 'Color', str(t_color)) @@ -1046,13 +1037,10 @@ def getshortname(self): return 'ableton' def gettype(self): return 'r' def getdawcapabilities(self): return { - 'fxrack': False, - 'track_lanes': False, 'placement_cut': True, 'placement_loop': True, - 'track_nopl': False, 'auto_nopl': True, - 'placement_audio_events': False + 'placement_audio_stretch': ['warp'] } def getsupportedplugins(self): return ['sampler', 'sampler-multi', 'sampler-slicer', 'vst2', 'vst3'] def parse(self, convproj_json, output_file): diff --git a/plugin_output/cvpj.py b/plugin_output/cvpj.py old mode 100755 new mode 100644 diff --git a/plugin_output/cvpj_m.py b/plugin_output/cvpj_m.py old mode 100755 new mode 100644 diff --git a/plugin_output/cvpj_mi.py b/plugin_output/cvpj_mi.py old mode 100755 new mode 100644 diff --git a/plugin_output/daw_flp.py b/plugin_output/daw_flp.py old mode 100755 new mode 100644 index 06c236b1..b16b8f81 --- a/plugin_output/daw_flp.py +++ b/plugin_output/daw_flp.py @@ -31,9 +31,7 @@ def getdawcapabilities(self): 'fxrack': True, 'track_lanes': True, 'placement_cut': True, - 'placement_loop': False, - 'auto_nopl': False, - 'track_nopl': False + 'placement_audio_stretch': ['rate', 'rate_ignoretempo'] } def getsupportedplugins(self): return ['sampler', 'vst2', 'vst3'] def parse(self, convproj_json, output_file): @@ -120,8 +118,6 @@ def parse(self, convproj_json, output_file): if 'bpm' in projJ: FL_Main['Tempo'] = projJ['bpm'] if 'pitch' in projJ: FL_Main['MainPitch'] = struct.unpack('H', struct.pack('h', int(projJ['pitch'])))[0] - tempomul = projJ['bpm']/120 - samples_id = {} inst_id = {} inst_id_count = 0 @@ -201,34 +197,35 @@ def parse(self, convproj_json, output_file): audiorate = 1 if 'audiomod' in CVPJ_Data: - if 'stretch' in CVPJ_Data['audiomod']: - cvpj_stretchdata = CVPJ_Data['audiomod']['stretch'] - if 'enabled' in cvpj_stretchdata: - if cvpj_stretchdata['enabled'] == True: - if 'mode' in cvpj_stretchdata: - if cvpj_stretchdata['mode'] == 'resample': T_Main['stretchingmode'] = 0 - elif cvpj_stretchdata['mode'] == 'elastique_v3': T_Main['stretchingmode'] = 1 - elif cvpj_stretchdata['mode'] == 'elastique_v3_mono': T_Main['stretchingmode'] = 2 - elif cvpj_stretchdata['mode'] == 'slice_stretch': T_Main['stretchingmode'] = 3 - elif cvpj_stretchdata['mode'] == 'auto': T_Main['stretchingmode'] = 4 - elif cvpj_stretchdata['mode'] == 'slice_map': T_Main['stretchingmode'] = 5 - elif cvpj_stretchdata['mode'] == 'elastique_v2': T_Main['stretchingmode'] = 6 - elif cvpj_stretchdata['mode'] == 'elastique_v2_transient': T_Main['stretchingmode'] = 7 - elif cvpj_stretchdata['mode'] == 'elastique_v2_mono': T_Main['stretchingmode'] = 8 - elif cvpj_stretchdata['mode'] == 'elastique_v2_speech': T_Main['stretchingmode'] = 9 - else: T_Main['stretchingmode'] = -1 - else: T_Main['stretchingmode'] = -1 - - if 'pitch' in cvpj_stretchdata: T_Main['stretchingpitch'] = int(cvpj_stretchdata['pitch']*100) - if 'time' in cvpj_stretchdata: - timedata = cvpj_stretchdata['time'] - if timedata['type'] == 'rate_timed': - audiorate = timedata['data']['rate'] - T_Main['stretchingtime'] = int( ((timedata['data']['rate']*384)*audioinfo['dur_sec'])*tempomul ) - if timedata['type'] == 'rate_nontimed': - audiorate = timedata['data']['rate'] - - T_Main['stretchingmultiplier'] = int( math.log2(timedata['data']['rate'])*10000 ) + + cvpj_audiomod = CVPJ_Data['audiomod'] + + if 'stretch_algorithm' in cvpj_audiomod: + if cvpj_audiomod['stretch_algorithm'] == 'resample': T_Main['stretchingmode'] = 0 + elif cvpj_audiomod['stretch_algorithm'] == 'elastique_v3': T_Main['stretchingmode'] = 1 + elif cvpj_audiomod['stretch_algorithm'] == 'elastique_v3_mono': T_Main['stretchingmode'] = 2 + elif cvpj_audiomod['stretch_algorithm'] == 'slice_stretch': T_Main['stretchingmode'] = 3 + elif cvpj_audiomod['stretch_algorithm'] == 'auto': T_Main['stretchingmode'] = 4 + elif cvpj_audiomod['stretch_algorithm'] == 'slice_map': T_Main['stretchingmode'] = 5 + elif cvpj_audiomod['stretch_algorithm'] == 'elastique_v2': T_Main['stretchingmode'] = 6 + elif cvpj_audiomod['stretch_algorithm'] == 'elastique_v2_transient': T_Main['stretchingmode'] = 7 + elif cvpj_audiomod['stretch_algorithm'] == 'elastique_v2_mono': T_Main['stretchingmode'] = 8 + elif cvpj_audiomod['stretch_algorithm'] == 'elastique_v2_speech': T_Main['stretchingmode'] = 9 + else: T_Main['stretchingmode'] = -1 + else: T_Main['stretchingmode'] = -1 + + if 'pitch' in cvpj_audiomod: T_Main['stretchingpitch'] = int(cvpj_audiomod['pitch']*100) + + if 'stretch_data' in cvpj_audiomod: + timedata = cvpj_audiomod['stretch_data'] + + if cvpj_audiomod['stretch_method'] == 'rate_ignoretempo': + audiorate = timedata['rate'] + T_Main['stretchingtime'] = int( (audioinfo['dur_sec']*384)/audiorate ) + + if cvpj_audiomod['stretch_method'] == 'rate': + audiorate = timedata['rate'] + T_Main['stretchingmultiplier'] = int( math.log2(1/audiorate)*10000 ) samplestretch[CVPJ_Entry] = audiorate @@ -335,6 +332,7 @@ def parse(self, convproj_json, output_file): if 'cut' in CVPJ_Placement: if 'type' in CVPJ_Placement['cut']: if CVPJ_Placement['cut']['type'] == 'cut': + if 'start' in CVPJ_Placement['cut']: FL_playlistitem['startoffset'] = int((CVPJ_Placement['cut']['start']*ppq)/4) if 'end' in CVPJ_Placement['cut']: FL_playlistitem['endoffset'] = int((CVPJ_Placement['cut']['end']*ppq)/4) if FL_playlistitem['position'] not in FL_Playlist_BeforeSort: @@ -348,18 +346,13 @@ def parse(self, convproj_json, output_file): s_str = samplestretch[CVPJ_Placement['fromindex']] - sampleid = samples_id[CVPJ_Placement['fromindex']] FL_playlistitem = {} FL_playlistitem['position'] = int((CVPJ_Placement['position']*ppq)/4) - #print(int((CVPJ_Placement['position']*ppq)/4)) FL_playlistitem['patternbase'] = 20480 FL_playlistitem['itemindex'] = sampleid - if 'duration' in CVPJ_Placement: - FL_playlistitem['length'] = int((CVPJ_Placement['duration']*ppq)/4) - if CVPJ_Placement != 0: - FL_playlistitem['startoffset'] = 0 - FL_playlistitem['endoffset'] = CVPJ_Placement['duration']/s_str + FL_playlistitem['length'] = int((CVPJ_Placement['duration']*ppq)/4) + FL_playlistitem['unknown1'] = 120 FL_playlistitem['unknown2'] = 25664 FL_playlistitem['unknown3'] = 32896 @@ -369,19 +362,18 @@ def parse(self, convproj_json, output_file): if CVPJ_Placement['muted'] == True: FL_playlistitem['flags'] = 12352 - if 'cut' in CVPJ_Placement: if 'type' in CVPJ_Placement['cut']: - if CVPJ_Placement['cut']['type'] == 'cut': - if 'start_nonstretch' in CVPJ_Placement['cut']: - FL_playlistitem['startoffset'] = CVPJ_Placement['cut']['start_nonstretch'] + cutdata = CVPJ_Placement['cut'] + + if cutdata['type'] == 'cut': - if 'end_nonstretch' in CVPJ_Placement['cut']: - FL_playlistitem['endoffset'] = CVPJ_Placement['cut']['end_nonstretch'] + if 'start_nonstretch' in cutdata: + FL_playlistitem['startoffset'] = cutdata['start_nonstretch'] - FL_playlistitem['endoffset'] = CVPJ_Placement['cut']['end_nonstretch'] + if 'end_nonstretch' in cutdata: + FL_playlistitem['endoffset'] = cutdata['end_nonstretch'] - if FL_playlistitem['position'] not in FL_Playlist_BeforeSort: FL_Playlist_BeforeSort[FL_playlistitem['position']] = [] FL_Playlist_BeforeSort[FL_playlistitem['position']].append(FL_playlistitem) diff --git a/plugin_output/daw_lmms.py b/plugin_output/daw_lmms.py old mode 100755 new mode 100644 diff --git a/plugin_output/dawproject.py b/plugin_output/dawproject.py old mode 100755 new mode 100644 diff --git a/plugin_output/midi.py b/plugin_output/midi.py old mode 100755 new mode 100644 diff --git a/plugin_output/muse.py b/plugin_output/muse.py old mode 100755 new mode 100644 diff --git a/plugin_output/reaper.py b/plugin_output/reaper.py old mode 100755 new mode 100644 diff --git a/requirements.txt b/requirements.txt old mode 100755 new mode 100644 index 85fbbc8c..5a05a7b6 --- a/requirements.txt +++ b/requirements.txt @@ -7,4 +7,5 @@ av beautifulsoup4 blackboxprotobuf pypng -git+https://github.com/Perlence/rpp \ No newline at end of file +git+https://github.com/Perlence/rpp +tinydb \ No newline at end of file