diff --git a/figures/fig_invariance_field.pdf b/figures/fig_invariance_field.pdf new file mode 100644 index 0000000..c76038f Binary files /dev/null and b/figures/fig_invariance_field.pdf differ diff --git a/figures/fig_invariance_full_Chorthippus_biguttulus.pdf b/figures/fig_invariance_full_Chorthippus_biguttulus.pdf index d038bec..4e7bab4 100644 Binary files a/figures/fig_invariance_full_Chorthippus_biguttulus.pdf and b/figures/fig_invariance_full_Chorthippus_biguttulus.pdf differ diff --git a/figures/fig_invariance_full_Omocestus_rufipes.pdf b/figures/fig_invariance_full_Omocestus_rufipes.pdf index 27a99d9..6b3d4bd 100644 Binary files a/figures/fig_invariance_full_Omocestus_rufipes.pdf and b/figures/fig_invariance_full_Omocestus_rufipes.pdf differ diff --git a/figures/fig_invariance_log-hp_appendix.pdf b/figures/fig_invariance_log-hp_appendix.pdf index 4469685..e5d2397 100644 Binary files a/figures/fig_invariance_log-hp_appendix.pdf and b/figures/fig_invariance_log-hp_appendix.pdf differ diff --git a/figures/fig_invariance_log_hp.pdf b/figures/fig_invariance_log_hp.pdf index 90447ec..ac11b58 100644 Binary files a/figures/fig_invariance_log_hp.pdf and b/figures/fig_invariance_log_hp.pdf differ diff --git a/figures/fig_invariance_short_Omocestus_rufipes.pdf b/figures/fig_invariance_short_Omocestus_rufipes.pdf index fd7f41e..71aa8ff 100644 Binary files a/figures/fig_invariance_short_Omocestus_rufipes.pdf and b/figures/fig_invariance_short_Omocestus_rufipes.pdf differ diff --git a/figures/fig_invariance_thresh-lp_appendix.pdf b/figures/fig_invariance_thresh-lp_noise_appendix.pdf similarity index 99% rename from figures/fig_invariance_thresh-lp_appendix.pdf rename to figures/fig_invariance_thresh-lp_noise_appendix.pdf index 1a45ae1..9b1a6b0 100644 Binary files a/figures/fig_invariance_thresh-lp_appendix.pdf and b/figures/fig_invariance_thresh-lp_noise_appendix.pdf differ diff --git a/figures/fig_invariance_thresh-lp_pure_appendix.pdf b/figures/fig_invariance_thresh-lp_pure_appendix.pdf new file mode 100644 index 0000000..3638c94 Binary files /dev/null and b/figures/fig_invariance_thresh-lp_pure_appendix.pdf differ diff --git a/figures/fig_invariance_thresh_lp_species.pdf b/figures/fig_invariance_thresh_lp_species.pdf index 53f61e6..6f9f8fa 100644 Binary files a/figures/fig_invariance_thresh_lp_species.pdf and b/figures/fig_invariance_thresh_lp_species.pdf differ diff --git a/figures/fig_kernel_sd_perc_field_appendix.pdf b/figures/fig_kernel_sd_perc_field_appendix.pdf new file mode 100644 index 0000000..23e3789 Binary files /dev/null and b/figures/fig_kernel_sd_perc_field_appendix.pdf differ diff --git a/figures/fig_kernel_sd_perc_full_appendix.pdf b/figures/fig_kernel_sd_perc_full_appendix.pdf new file mode 100644 index 0000000..a4f5128 Binary files /dev/null and b/figures/fig_kernel_sd_perc_full_appendix.pdf differ diff --git a/figures/fig_kernel_sd_perc_short_appendix.pdf b/figures/fig_kernel_sd_perc_short_appendix.pdf new file mode 100644 index 0000000..e8d4cd1 Binary files /dev/null and b/figures/fig_kernel_sd_perc_short_appendix.pdf differ diff --git a/figures/fig_kernel_sd_perc_thresh_lp_appendix.pdf b/figures/fig_kernel_sd_perc_thresh_lp_appendix.pdf new file mode 100644 index 0000000..c4aa55c Binary files /dev/null and b/figures/fig_kernel_sd_perc_thresh_lp_appendix.pdf differ diff --git a/main.aux b/main.aux index 77866ef..31f64b7 100644 --- a/main.aux +++ b/main.aux @@ -256,25 +256,29 @@ \@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces \textbf {Feature representation of different species-specific songs saturates at different points in feature space.} }}{15}{}\protected@file@percent } \newlabel{fig:inv_thresh-lp_species}{{6}{15}{}{}{}} \@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces \textbf {Step-wise emergence of intensity invariant song representation along the model pathway.} }}{16}{}\protected@file@percent } -\newlabel{fig:inv_thresh-lp_full}{{7}{16}{}{}{}} +\newlabel{fig:inv_full}{{7}{16}{}{}{}} \@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces \textbf {Step-wise emergence of intensity invariant song representation along the model pathway.} }}{17}{}\protected@file@percent } -\newlabel{fig:inv_thresh-lp_short}{{8}{17}{}{}{}} -\newlabel{eq:pdf_split}{{15}{18}{}{}{}} -\newlabel{eq:feat_avg}{{16}{18}{}{}{}} -\newlabel{eq:feat_prop}{{17}{18}{}{}{}} +\newlabel{fig:inv_short}{{8}{17}{}{}{}} +\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces \textbf {Step-wise emergence of intensity invariant song representation along the model pathway.} }}{18}{}\protected@file@percent } +\newlabel{fig:inv_field}{{9}{18}{}{}{}} +\newlabel{eq:pdf_split}{{15}{19}{}{}{}} +\newlabel{eq:feat_avg}{{16}{19}{}{}{}} +\newlabel{eq:feat_prop}{{17}{19}{}{}{}} \abx@aux@cite{0}{stumpner1991auditory} \abx@aux@segm{0}{0}{stumpner1991auditory} -\@writefile{toc}{\contentsline {section}{\numberline {4}Discriminating species-specific song\\patterns in feature space}{19}{}\protected@file@percent } -\@writefile{toc}{\contentsline {section}{\numberline {5}Conclusions \& outlook}{19}{}\protected@file@percent } -\abx@aux@page{73}{19} -\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces \textbf {} }}{21}{}\protected@file@percent } -\newlabel{}{{9}{21}{}{}{}} -\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces \textbf {} }}{21}{}\protected@file@percent } -\newlabel{}{{10}{21}{}{}{}} +\@writefile{toc}{\contentsline {section}{\numberline {4}Discriminating species-specific song\\patterns in feature space}{20}{}\protected@file@percent } +\@writefile{toc}{\contentsline {section}{\numberline {5}Conclusions \& outlook}{20}{}\protected@file@percent } +\abx@aux@page{73}{20} +\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces \textbf {} }}{22}{}\protected@file@percent } +\newlabel{}{{10}{22}{}{}{}} \@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces \textbf {} }}{22}{}\protected@file@percent } \newlabel{}{{11}{22}{}{}{}} -\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces \textbf {} }}{22}{}\protected@file@percent } -\newlabel{}{{12}{22}{}{}{}} +\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces \textbf {} }}{23}{}\protected@file@percent } +\newlabel{}{{12}{23}{}{}{}} +\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces \textbf {} }}{23}{}\protected@file@percent } +\newlabel{}{{13}{23}{}{}{}} +\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces \textbf {} }}{24}{}\protected@file@percent } +\newlabel{}{{14}{24}{}{}{}} \gdef\svg@ink@ver@settings{{\m@ne }{inkscape}{\m@ne }} \abx@aux@read@bbl@mdfivesum{1380DC8C93D2855FDB132CC5A40AD52F} -\gdef \@abspage@last{22} +\gdef \@abspage@last{24} diff --git a/main.blg b/main.blg index 49299cf..8f65056 100644 --- a/main.blg +++ b/main.blg @@ -1,71 +1,71 @@ [0] Config.pm:307> INFO - This is Biber 2.19 [0] Config.pm:310> INFO - Logfile is 'main.blg' -[37] biber:340> INFO - === Mo Apr 20, 2026, 17:47:44 -[45] Biber.pm:419> INFO - Reading 'main.bcf' -[74] Biber.pm:979> INFO - Found 55 citekeys in bib section 0 -[80] Biber.pm:4419> INFO - Processing section 0 -[85] Biber.pm:4610> INFO - Looking for bibtex file 'cite.bib' for section 0 -[86] bibtex.pm:1713> INFO - LaTeX decoding ... -[117] bibtex.pm:1519> INFO - Found BibTeX data source 'cite.bib' -[303] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'variable = shifted' with 'variable = non-ignorable' -[303] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'normalization = NFD' with 'normalization = prenormalized' -[303] Biber.pm:4239> INFO - Sorting list 'nyt/global//global/global' of type 'entry' with template 'nyt' and locale 'en-US' -[303] Biber.pm:4245> INFO - No sort tailoring available for locale 'en-US' -[328] bbl.pm:660> INFO - Writing 'main.bbl' with encoding 'UTF-8' -[339] bbl.pm:763> INFO - Output to main.bbl -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 10, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 21, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 38, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 49, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 58, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 73, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 82, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 91, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 100, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 109, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 118, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 127, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 136, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 157, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 178, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 187, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 196, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 207, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 218, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 229, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 240, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 249, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 258, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 269, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 278, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 289, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 300, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 309, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 328, warning: 6 characters of junk seen at toplevel -[339] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 337, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 400, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 419, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 428, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 437, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 456, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 491, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 526, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 535, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 556, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 565, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 576, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 587, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 619, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 648, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 658, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 667, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 688, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 709, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 720, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 729, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 749, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 766, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 775, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 800, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_kNLL/347c261ec4135a5723bef5c751f5078f_287142.utf8, line 817, warning: 6 characters of junk seen at toplevel -[340] Biber.pm:133> INFO - WARNINGS: 55 +[39] biber:340> INFO - === Do Apr 23, 2026, 12:30:17 +[50] Biber.pm:419> INFO - Reading 'main.bcf' +[81] Biber.pm:979> INFO - Found 55 citekeys in bib section 0 +[87] Biber.pm:4419> INFO - Processing section 0 +[93] Biber.pm:4610> INFO - Looking for bibtex file 'cite.bib' for section 0 +[95] bibtex.pm:1713> INFO - LaTeX decoding ... +[126] bibtex.pm:1519> INFO - Found BibTeX data source 'cite.bib' +[323] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'variable = shifted' with 'variable = non-ignorable' +[323] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'normalization = NFD' with 'normalization = prenormalized' +[323] Biber.pm:4239> INFO - Sorting list 'nyt/global//global/global' of type 'entry' with template 'nyt' and locale 'en-US' +[323] Biber.pm:4245> INFO - No sort tailoring available for locale 'en-US' +[348] bbl.pm:660> INFO - Writing 'main.bbl' with encoding 'UTF-8' +[359] bbl.pm:763> INFO - Output to main.bbl +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 10, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 21, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 38, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 49, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 58, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 73, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 82, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 91, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 100, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 109, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 118, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 127, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 136, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 157, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 178, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 187, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 196, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 207, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 218, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 229, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 240, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 249, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 258, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 269, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 278, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 289, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 300, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 309, warning: 6 characters of junk seen at toplevel +[359] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 328, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 337, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 400, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 419, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 428, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 437, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 456, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 491, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 526, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 535, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 556, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 565, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 576, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 587, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 619, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 648, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 658, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 667, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 688, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 709, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 720, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 729, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 749, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 766, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 775, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 800, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_UrtA/347c261ec4135a5723bef5c751f5078f_61557.utf8, line 817, warning: 6 characters of junk seen at toplevel +[360] Biber.pm:133> INFO - WARNINGS: 55 diff --git a/main.fdb_latexmk b/main.fdb_latexmk index 1d186ad..b0fcf25 100644 --- a/main.fdb_latexmk +++ b/main.fdb_latexmk @@ -1,14 +1,14 @@ # Fdb version 4 -["biber main"] 1776700063.81436 "main.bcf" "main.bbl" "main" 1776785447.82764 0 +["biber main"] 1776940217.48712 "main.bcf" "main.bbl" "main" 1777031926.86359 0 "cite.bib" 1770904753.08918 27483 4290db0c91f7b5055e25472ef913f6b4 "" - "main.bcf" 1776785447.75713 112931 2a478116d80ebb1ada7083a24facd6e3 "pdflatex" + "main.bcf" 1777031926.78659 112931 2a478116d80ebb1ada7083a24facd6e3 "pdflatex" (generated) "main.bbl" "main.blg" (rewritten before read) -["pdflatex"] 1776785446.78385 "/home/hartling/phd/paper/paper_2025/main.tex" "main.pdf" "main" 1776785447.82786 0 +["pdflatex"] 1777031925.73488 "/home/hartling/phd/paper/paper_2025/main.tex" "main.pdf" "main" 1777031926.8638 0 "/etc/texmf/web2c/texmf.cnf" 1761560044.43676 475 c0e671620eb5563b2130f56340a5fde8 "" - "/home/hartling/phd/paper/paper_2025/main.tex" 1776761684.33358 49568 1208d661b3e7714265686ed03fb2017c "" + "/home/hartling/phd/paper/paper_2025/main.tex" 1777031925.5956 50083 fda38e01bf1bcfd1210581259b2830ea "" "/usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map" 1577235249 3524 cb3e574dea2d1052e39280babc910dc8 "" "/usr/share/texlive/texmf-dist/fonts/tfm/public/amsfonts/cmextra/cmex7.tfm" 1246382020 1004 54797486969f23fa377b128694d548df "" "/usr/share/texlive/texmf-dist/fonts/tfm/public/amsfonts/cmextra/cmex8.tfm" 1246382020 988 bdf658c3bfc2d96d3c8b02cfc1c94c20 "" @@ -153,20 +153,22 @@ "/var/lib/texmf/web2c/pdftex/pdflatex.fmt" 1761648508 8213325 7fd20752ab46ff9aa583e4973d7433df "" "figures/fig_auditory_pathway.pdf" 1771593904.14638 1153923 3df8539421fd21dc866cc8d320bd9b1d "" "figures/fig_feat_stages.pdf" 1774002994.98767 11091006 565fe951f1255c121429a060082398f5 "" - "figures/fig_invariance_full_Omocestus_rufipes.pdf" 1776762166.51645 3001639 ae47041e28cb66a93266e5c9ad6ed28f "" + "figures/fig_invariance_field.pdf" 1776952657.04263 9131898 e9d9acff1d03fdf60ddc9e32b87ae6c2 "" + "figures/fig_invariance_full_Omocestus_rufipes.pdf" 1776954098.39044 13431867 08ff6482ff81d2878ddba2a90d032d94 "" "figures/fig_invariance_log-hp_appendix.pdf" 1776779295.50028 534259 6995c3131a22abb1b9aae17e18bc68df "" "figures/fig_invariance_log_hp.pdf" 1776779076.07106 838498 414c5b733797773bd8f0bade77b06814 "" - "figures/fig_invariance_short_Omocestus_rufipes.pdf" 1776762283.97619 4889415 f7cc31a0e7bce520edaad1254f3c5f5c "" - "figures/fig_invariance_thresh-lp_appendix.pdf" 1776785227.64951 1484986 8922cee94bad1651998c57f4e2fb9a21 "" + "figures/fig_invariance_short_Omocestus_rufipes.pdf" 1776954155.39396 5547735 148c794495092c93fa8b9bf75411280a "" + "figures/fig_invariance_thresh-lp_noise_appendix.pdf" 1777031888.645 1484986 be874f4bea496315d46a234002a56649 "" + "figures/fig_invariance_thresh-lp_pure_appendix.pdf" 1777031869.54421 1387834 c66ab20b4634e644a5fa0c5034a755d0 "" "figures/fig_invariance_thresh_lp_single.pdf" 1776784894.60717 858048 b9f3ca65b0b098193ad6aaf282d96ee5 "" - "figures/fig_invariance_thresh_lp_species.pdf" 1776785408.28555 1599362 3bc91ff9752b78e2ce4d30fcc1c0b218 "" + "figures/fig_invariance_thresh_lp_species.pdf" 1776786332.58869 1607791 1f0bca0808347bbf2397b0d6c1523c31 "" "figures/fig_noise_env_sd_conversion_appendix.pdf" 1776328774.43347 45466 c2be20312c1572203bdbeb9c8e32525e "" "figures/fig_pre_stages.pdf" 1774002992.74268 449426 5762be15627fe5d8b6d108b7ea18db44 "" "figures/fig_saturation_log-hp_appendix.pdf" 1776785249.70227 28579 fadbe904f2452a45ddcbe039438cf4d4 "" - "main.aux" 1776785447.75013 15966 435cd0a2c2d30540fb65f62a3ed1812a "pdflatex" - "main.bbl" 1776700064.4732 91039 1380dc8c93d2855fdb132cc5a40ad52f "biber main" - "main.run.xml" 1776785447.75813 2335 a049bc26a7f032e842ce55de5bc38328 "pdflatex" - "main.tex" 1776761684.33358 49568 1208d661b3e7714265686ed03fb2017c "" + "main.aux" 1777031926.77959 16336 edd7f74d92f16bef6aeb665a1e6af779 "pdflatex" + "main.bbl" 1776940218.28593 91039 1380dc8c93d2855fdb132cc5a40ad52f "biber main" + "main.run.xml" 1777031926.78659 2335 a049bc26a7f032e842ce55de5bc38328 "pdflatex" + "main.tex" 1777031925.5956 50083 fda38e01bf1bcfd1210581259b2830ea "" (generated) "main.aux" "main.bcf" diff --git a/main.fls b/main.fls index eb3599b..807ea43 100644 --- a/main.fls +++ b/main.fls @@ -313,6 +313,11 @@ INPUT ./figures/fig_invariance_short_Omocestus_rufipes.pdf INPUT ./figures/fig_invariance_short_Omocestus_rufipes.pdf INPUT ./figures/fig_invariance_short_Omocestus_rufipes.pdf INPUT ./figures/fig_invariance_short_Omocestus_rufipes.pdf +INPUT ./figures/fig_invariance_field.pdf +INPUT ./figures/fig_invariance_field.pdf +INPUT ./figures/fig_invariance_field.pdf +INPUT ./figures/fig_invariance_field.pdf +INPUT ./figures/fig_invariance_field.pdf INPUT ./figures/fig_noise_env_sd_conversion_appendix.pdf INPUT ./figures/fig_noise_env_sd_conversion_appendix.pdf INPUT ./figures/fig_noise_env_sd_conversion_appendix.pdf @@ -328,11 +333,16 @@ INPUT ./figures/fig_saturation_log-hp_appendix.pdf INPUT ./figures/fig_saturation_log-hp_appendix.pdf INPUT ./figures/fig_saturation_log-hp_appendix.pdf INPUT ./figures/fig_saturation_log-hp_appendix.pdf -INPUT ./figures/fig_invariance_thresh-lp_appendix.pdf -INPUT ./figures/fig_invariance_thresh-lp_appendix.pdf -INPUT ./figures/fig_invariance_thresh-lp_appendix.pdf -INPUT ./figures/fig_invariance_thresh-lp_appendix.pdf -INPUT ./figures/fig_invariance_thresh-lp_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_pure_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_pure_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_pure_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_pure_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_pure_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_noise_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_noise_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_noise_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_noise_appendix.pdf +INPUT ./figures/fig_invariance_thresh-lp_noise_appendix.pdf INPUT main.aux INPUT main.run.xml OUTPUT main.run.xml diff --git a/main.log b/main.log index 3c5244f..bf17698 100644 --- a/main.log +++ b/main.log @@ -1,4 +1,4 @@ -This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023/Debian) (preloaded format=pdflatex 2025.10.28) 21 APR 2026 17:30 +This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023/Debian) (preloaded format=pdflatex 2025.10.28) 24 APR 2026 13:58 entering extended mode restricted \write18 enabled. file:line:error style messages enabled. @@ -601,6 +601,9 @@ LaTeX Warning: Label `' multiply defined. LaTeX Warning: Label `' multiply defined. +LaTeX Warning: Label `' multiply defined. + + LaTeX Warning: Label `' multiply defined. ) @@ -801,37 +804,47 @@ File: figures/fig_invariance_full_Omocestus_rufipes.pdf Graphic file (type pdf) Package pdftex.def Info: figures/fig_invariance_full_Omocestus_rufipes.pdf used on input line 716. (pdftex.def) Requested size: 483.69687pt x 483.69566pt. [15 <./figures/fig_invariance_thresh_lp_species.pdf>] [16 <./figures/fig_invariance_full_Omocestus_rufipes.pdf>] - + File: figures/fig_invariance_short_Omocestus_rufipes.pdf Graphic file (type pdf) Package pdftex.def Info: figures/fig_invariance_short_Omocestus_rufipes.pdf used on input line 726. +(pdftex.def) Requested size: 483.69687pt x 483.69566pt. + +File: figures/fig_invariance_field.pdf Graphic file (type pdf) + +Package pdftex.def Info: figures/fig_invariance_field.pdf used on input line 736. (pdftex.def) Requested size: 483.69687pt x 483.69566pt. [17 - <./figures/fig_invariance_short_Omocestus_rufipes.pdf>] [18] [19] - + <./figures/fig_invariance_short_Omocestus_rufipes.pdf>] [18 <./figures/fig_invariance_field.pdf>] [19] [20] + File: figures/fig_noise_env_sd_conversion_appendix.pdf Graphic file (type pdf) -Package pdftex.def Info: figures/fig_noise_env_sd_conversion_appendix.pdf used on input line 885. +Package pdftex.def Info: figures/fig_noise_env_sd_conversion_appendix.pdf used on input line 895. (pdftex.def) Requested size: 483.69687pt x 241.84782pt. - [20] - + [21] + File: figures/fig_invariance_log-hp_appendix.pdf Graphic file (type pdf) -Package pdftex.def Info: figures/fig_invariance_log-hp_appendix.pdf used on input line 894. +Package pdftex.def Info: figures/fig_invariance_log-hp_appendix.pdf used on input line 904. (pdftex.def) Requested size: 483.69687pt x 241.84782pt. - + File: figures/fig_saturation_log-hp_appendix.pdf Graphic file (type pdf) -Package pdftex.def Info: figures/fig_saturation_log-hp_appendix.pdf used on input line 903. +Package pdftex.def Info: figures/fig_saturation_log-hp_appendix.pdf used on input line 913. (pdftex.def) Requested size: 483.69687pt x 241.84782pt. - [21 <./figures/fig_noise_env_sd_conversion_appendix.pdf> <./figures/fig_invariance_log-hp_appendix.pdf>] - -File: figures/fig_invariance_thresh-lp_appendix.pdf Graphic file (type pdf) - -Package pdftex.def Info: figures/fig_invariance_thresh-lp_appendix.pdf used on input line 912. + [22 <./figures/fig_noise_env_sd_conversion_appendix.pdf> <./figures/fig_invariance_log-hp_appendix.pdf>] + +File: figures/fig_invariance_thresh-lp_pure_appendix.pdf Graphic file (type pdf) + +Package pdftex.def Info: figures/fig_invariance_thresh-lp_pure_appendix.pdf used on input line 922. (pdftex.def) Requested size: 483.69687pt x 241.84782pt. - [22 <./figures/fig_saturation_log-hp_appendix.pdf> <./figures/fig_invariance_thresh-lp_appendix.pdf>] (./main.aux) + +File: figures/fig_invariance_thresh-lp_noise_appendix.pdf Graphic file (type pdf) + +Package pdftex.def Info: figures/fig_invariance_thresh-lp_noise_appendix.pdf used on input line 931. +(pdftex.def) Requested size: 483.69687pt x 241.84782pt. + [23 <./figures/fig_saturation_log-hp_appendix.pdf> <./figures/fig_invariance_thresh-lp_pure_appendix.pdf>] [24 <./figures/fig_invariance_thresh-lp_noise_appendix.pdf>] (./main.aux) *********** LaTeX2e <2023-11-01> patch level 1 L3 programming layer <2024-01-22> @@ -845,18 +858,18 @@ Package logreq Info: Writing requests to 'main.run.xml'. ) Here is how much of TeX's memory you used: - 20791 strings out of 474222 - 449772 string characters out of 5748732 + 20806 strings out of 474222 + 450527 string characters out of 5748732 1937975 words of memory out of 5000000 - 42781 multiletter control sequences out of 15000+600000 + 42796 multiletter control sequences out of 15000+600000 569394 words of font info for 79 fonts, out of 8000000 for 9000 1143 hyphenation exceptions out of 8191 94i,18n,93p,1496b,1740s stack positions out of 10000i,1000n,20000p,200000b,200000s -Output written on main.pdf (22 pages, 26092062 bytes). +Output written on main.pdf (24 pages, 47693796 bytes). PDF statistics: - 1889 PDF objects out of 2073 (max. 8388607) - 939 compressed objects within 10 object streams + 2027 PDF objects out of 2073 (max. 8388607) + 977 compressed objects within 10 object streams 0 named destinations out of 1000 (max. 500000) - 73 words of extra memory for PDF output out of 10000 (max. 10000000) + 83 words of extra memory for PDF output out of 10000 (max. 10000000) diff --git a/main.pdf b/main.pdf index e4e5850..2854207 100644 Binary files a/main.pdf and b/main.pdf differ diff --git a/main.synctex.gz b/main.synctex.gz index 028d111..d14b39c 100644 Binary files a/main.synctex.gz and b/main.synctex.gz differ diff --git a/main.tex b/main.tex index 088b8b6..eb636c8 100644 --- a/main.tex +++ b/main.tex @@ -717,7 +717,7 @@ the signal for reliable song recognition. \caption{\textbf{Step-wise emergence of intensity invariant song representation along the model pathway.} } - \label{fig:inv_thresh-lp_full} + \label{fig:inv_full} \end{figure} \FloatBarrier @@ -727,7 +727,17 @@ the signal for reliable song recognition. \caption{\textbf{Step-wise emergence of intensity invariant song representation along the model pathway.} } - \label{fig:inv_thresh-lp_short} + \label{fig:inv_short} +\end{figure} +\FloatBarrier + +\begin{figure}[!ht] + \centering + \includegraphics[width=\textwidth]{figures/fig_invariance_field.pdf} + \caption{\textbf{Step-wise emergence of intensity invariant song + representation along the model pathway.} + } + \label{fig:inv_field} \end{figure} \FloatBarrier @@ -909,7 +919,16 @@ initiation of one behavior over another is categorical (e.g. approach/stay) \begin{figure}[!ht] \centering - \includegraphics[width=\textwidth]{figures/fig_invariance_thresh-lp_appendix.pdf} + \includegraphics[width=\textwidth]{figures/fig_invariance_thresh-lp_pure_appendix.pdf} + \caption{\textbf{} + } + \label{} +\end{figure} +\FloatBarrier + +\begin{figure}[!ht] + \centering + \includegraphics[width=\textwidth]{figures/fig_invariance_thresh-lp_noise_appendix.pdf} \caption{\textbf{} } \label{} diff --git a/python/collect_inv_data_field.py b/python/collect_inv_data_field.py index 1501e88..d71601d 100644 --- a/python/collect_inv_data_field.py +++ b/python/collect_inv_data_field.py @@ -15,7 +15,7 @@ for i, species in enumerate(target_species): print(f'Processing {species}') # Fetch all species-specific song files: - all_paths = search_files(species, ext='npz', dir=search_path) + all_paths = search_files(species, excl='merged_noise',ext='npz', dir=search_path) if not all_paths: continue diff --git a/python/collect_inv_data_log-hp.py b/python/collect_inv_data_log-hp.py index 3611e2b..90fe3fc 100644 --- a/python/collect_inv_data_log-hp.py +++ b/python/collect_inv_data_log-hp.py @@ -1,7 +1,6 @@ import numpy as np from thunderhopper.filetools import search_files from thunderhopper.modeltools import load_data, save_data -from misc_functions import sort_files_by_rec from IPython import embed # GENERAL SETTINGS: diff --git a/python/collect_inv_data_rect-lp.py b/python/collect_inv_data_rect-lp.py new file mode 100644 index 0000000..f3633e7 --- /dev/null +++ b/python/collect_inv_data_rect-lp.py @@ -0,0 +1,51 @@ +import numpy as np +from thunderhopper.filetools import search_files +from thunderhopper.modeltools import load_data, save_data +from IPython import embed + +# GENERAL SETTINGS: +mode = ['pure', 'noise'][1] +target_species = [ + 'Chorthippus_biguttulus', + 'Chorthippus_mollis', + 'Chrysochraon_dispar', + 'Euchorthippus_declivus', + 'Gomphocerippus_rufus', + 'Omocestus_rufipes', + 'Pseudochorthippus_parallelus', +] +stages = ['filt', 'env'] +search_path = '../data/inv/rect_lp/' +save_path = '../data/inv/rect_lp/collected/' + +# EXECUTION: +for i, species in enumerate(target_species): + print(f'Processing {species}') + + # Fetch all species-specific song files: + all_paths = search_files(species, incl=mode, ext='npz', dir=search_path) + + # Run through files: + for j, path in enumerate(all_paths): + + # Load invariance data: + data, config = load_data(path, 'scales', 'measure') + + if j == 0: + # Prepare species-specific storage: + species_data = dict(scales=data['scales']) + for stage in stages: + mkey = f'measure_{stage}' + shape = data[mkey].shape + (len(all_paths),) + species_data[mkey] = np.zeros(shape, dtype=float) + + # Log species data: + for stage in stages: + mkey = f'measure_{stage}' + species_data[mkey][..., j] = data[mkey] + + # Save collected file data: + save_name = save_path + species + '_' + mode + save_data(save_name, species_data, config, overwrite=True) + +print('Done.') diff --git a/python/collect_inv_data_short.py b/python/collect_inv_data_short.py index 29b4423..ab7d3f1 100644 --- a/python/collect_inv_data_short.py +++ b/python/collect_inv_data_short.py @@ -13,7 +13,7 @@ target_species = [ 'Omocestus_rufipes', 'Pseudochorthippus_parallelus', ] -stages = ['filt', 'env', 'conv', 'feat'] +stages = ['filt', 'env', 'inv', 'conv', 'feat'] search_path = '../data/inv/short/' save_path = '../data/inv/short/collected/' diff --git a/python/condense_inv_data_field.py b/python/condense_inv_data_field.py index d569374..3dc2482 100644 --- a/python/condense_inv_data_field.py +++ b/python/condense_inv_data_field.py @@ -42,6 +42,7 @@ target_species = ['Pseudochorthippus_parallelus'] mode = ['song', 'noise'][0] stages = ['raw', 'filt', 'env', 'log', 'inv', 'conv', 'feat'] search_path = f'../data/inv/field/{mode}/' +ref_path = f'../data/inv/field/ref_measures.npz' save_path = f'../data/inv/field/{mode}/condensed/' sources = [ 'JJ', @@ -53,16 +54,27 @@ normalization = 'none' if mode == 'song': normalization = [ 'none', - # 'base', + 'min', + 'max', + 'base', 'range' - ][-1] + ][1] +suffix = dict( + none='_unnormed', + min='_norm-min', + max='_norm-max', + base='_norm-base', + range='_norm-range' +)[normalization] +if normalization == 'base': + ref_data = dict(np.load(ref_path)) # EXECUTION: for i, species in enumerate(target_species): print(f'Processing {species}') # Fetch all species-specific song files: - all_paths = search_files(species, ext='npz', dir=search_path) + all_paths = search_files(species, excl='merged_noise', ext='npz', dir=search_path) if not all_paths: continue @@ -94,7 +106,17 @@ for i, species in enumerate(target_species): for stage in stages: mkey = f'measure_{stage}' - if normalization == 'range': + if normalization == 'min': + # Minimum normalization: + data[mkey] /= data[mkey].min(axis=0, keepdims=True) + elif normalization == 'max': + # Maximum normalization: + data[mkey] /= data[mkey].max(axis=0, keepdims=True) + elif normalization == 'base': + # Noise baseline normalization: + data[mkey] /= ref_data[stage] + # data[mkey] /= data[mkey][0] + elif normalization == 'range': # Min-max normalization: min_measure = data[mkey].min(axis=0, keepdims=True) max_measure = data[mkey].max(axis=0, keepdims=True) @@ -106,18 +128,15 @@ for i, species in enumerate(target_species): for stage in stages: rec_mean[f'mean_{stage}'][..., j] = np.nanmean(file_data[stage], axis=-1) rec_sd[f'sd_{stage}'][..., j] = np.nanstd(file_data[stage], axis=-1) + if len(sorted_paths) == 1: + # Prune recording dimension for single recording: + rec_mean[f'mean_{stage}'] = rec_mean[f'mean_{stage}'][..., 0] + rec_sd[f'sd_{stage}'] = rec_sd[f'sd_{stage}'][..., 0] # Save condensed recording data: - save_name = save_path + species - if normalization == 'none': - save_name += '_unnormed' - elif normalization == 'base': - save_name += '_norm-base' - elif normalization == 'range': - save_name += '_norm-range' archive = dict(distances=data['distances']) archive.update(rec_mean) archive.update(rec_sd) - save_data(save_name, archive, config, overwrite=True) + save_data(save_path + species + suffix, archive, config, overwrite=True) print('Done.') diff --git a/python/condense_inv_data_full.py b/python/condense_inv_data_full.py index a3ffcfc..b4b8e15 100644 --- a/python/condense_inv_data_full.py +++ b/python/condense_inv_data_full.py @@ -28,9 +28,18 @@ save_path = '../data/inv/full/condensed/' # ANALYSIS SETTINGS: normalization = [ 'none', + 'min', + 'max', 'base', - 'range' + 'range', ][2] +suffix = dict( + none='_unnormed', + min='_norm-min', + max='_norm-max', + base='_norm-base', + range='_norm-range' +)[normalization] # EXECUTION: for i, species in enumerate(target_species): @@ -69,7 +78,13 @@ for i, species in enumerate(target_species): for stage in stages: mkey = f'measure_{stage}' - if normalization == 'base': + if normalization == 'min': + # Minimum normalization: + data[mkey] /= data[mkey].min(axis=0, keepdims=True) + elif normalization == 'max': + # Maximum normalization: + data[mkey] /= data[mkey].max(axis=0, keepdims=True) + elif normalization == 'base': # Noise baseline normalization: data[mkey] /= data[mkey][0] elif normalization == 'range': @@ -86,16 +101,9 @@ for i, species in enumerate(target_species): rec_sd[f'sd_{stage}'][..., j] = np.nanstd(file_data[stage], axis=-1) # Save condensed recording data: - save_name = save_path + species - if normalization == 'none': - save_name += '_unnormed' - elif normalization == 'base': - save_name += '_norm-base' - elif normalization == 'range': - save_name += '_norm-range' archive = dict(scales=data['scales']) archive.update(rec_mean) archive.update(rec_sd) - save_data(save_name, archive, config, overwrite=True) + save_data(save_path + species + suffix, archive, config, overwrite=True) print('Done.') diff --git a/python/condense_inv_data_log-hp.py b/python/condense_inv_data_log-hp.py index 5c87b41..e525699 100644 --- a/python/condense_inv_data_log-hp.py +++ b/python/condense_inv_data_log-hp.py @@ -26,7 +26,21 @@ search_path = '../data/inv/log_hp/' save_path = '../data/inv/log_hp/condensed/' # ANALYSIS SETTINGS: -compute_ratios = True +mode = 'noise' +normalization = [ + 'none', + 'min', + 'max', + 'base', + 'range', + ][3] +suffix = dict( + none='_unnormed', + min='_norm-min', + max='_norm-max', + base='_norm-base', + range='_norm-range' +)[normalization] plot_overview = True # PREPARATION: @@ -44,7 +58,7 @@ for i, species in enumerate(target_species): axes[0, i].set_title(shorten_species(species)) # Fetch all species-specific song files: - all_paths = search_files(species, incl='noise', ext='npz', dir=search_path) + all_paths = search_files(species, incl=mode, ext='npz', dir=search_path) # Sort song files by recording (one or more per source): sorted_paths = sort_files_by_rec(all_paths, sources) @@ -57,10 +71,6 @@ for i, species in enumerate(target_species): data, config = load_data(path, ['scales', 'measure_inv']) scales, measure = data['scales'], data['measure_inv'] - # Relate to noise: - if compute_ratios: - measure /= measure[0] - if k == 0: # Prepare song file-specific storage: file_data = np.zeros((scales.size, len(rec_paths)), dtype=float) @@ -70,6 +80,21 @@ for i, species in enumerate(target_species): rec_sd = np.zeros((scales.size, len(sorted_paths)), dtype=float) # Log song file data: + if normalization == 'min': + # Minimum normalization: + measure /= measure.min(axis=0, keepdims=True) + elif normalization == 'max': + # Maximum normalization: + measure /= measure.max(axis=0, keepdims=True) + elif normalization == 'base': + # Noise baseline normalization: + measure /= measure[0] + elif normalization == 'range': + # Min-max normalization: + min_measure = measure.min(axis=0, keepdims=True) + max_measure = measure.max(axis=0, keepdims=True) + measure = (measure - min_measure) / (max_measure - min_measure) + file_data[:, k] = measure if plot_overview: @@ -85,8 +110,9 @@ for i, species in enumerate(target_species): rec_mean[:, j] + rec_sd[:, j], color='k', alpha=0.2) # Save condensed recording data for current species: + save_name = save_path + species + '_' + mode + suffix archive = dict(scales=scales, mean_inv=rec_mean, sd_inv=rec_sd) - save_data(save_path + species, archive, config, overwrite=True) + save_data(save_name, archive, config, overwrite=True) if plot_overview: spec_mean = rec_mean.mean(axis=1) diff --git a/python/condense_inv_data_rect-lp.py b/python/condense_inv_data_rect-lp.py new file mode 100644 index 0000000..5a41f83 --- /dev/null +++ b/python/condense_inv_data_rect-lp.py @@ -0,0 +1,109 @@ +import numpy as np +from thunderhopper.filetools import search_files +from thunderhopper.modeltools import load_data, save_data +from misc_functions import sort_files_by_rec +from IPython import embed + +# GENERAL SETTINGS: +target_species = [ + 'Chorthippus_biguttulus', + 'Chorthippus_mollis', + 'Chrysochraon_dispar', + 'Euchorthippus_declivus', + 'Gomphocerippus_rufus', + 'Omocestus_rufipes', + 'Pseudochorthippus_parallelus', +] +sources = [ + 'BM04', + 'BM93', + 'DJN', + 'GBC', + 'FTN' +] +stages = ['filt', 'env'] +search_path = '../data/inv/rect_lp/' +save_path = '../data/inv/rect_lp/condensed/' + +# ANALYSIS SETTINGS: +mode = ['pure', 'noise'][1] +normalization = [ + 'none', + 'min', + 'max', + 'base', + 'range', + ][3] +suffix = dict( + none='_unnormed', + min='_norm-min', + max='_norm-max', + base='_norm-base', + range='_norm-range' +)[normalization] + +# EXECUTION: +for i, species in enumerate(target_species): + print(f'Processing {species}') + + # Fetch all species-specific song files: + all_paths = search_files(species, incl=mode, ext='npz', dir=search_path) + + # Sort song files by recording (one or more per source): + sorted_paths = sort_files_by_rec(all_paths, sources) + + # Condense across song files per recording: + for j, rec_paths in enumerate(sorted_paths): + for k, path in enumerate(rec_paths): + + # Load invariance data: + data, config = load_data(path, 'scales', 'measure') + + if k == 0: + # Prepare song file-specific storage: + file_data = {} + for stage in stages: + shape = data[f'measure_{stage}'].shape + (len(rec_paths),) + file_data[stage] = np.zeros(shape, dtype=float) + if j == 0: + # Prepare recording-specific storage: + rec_mean, rec_sd = {}, {} + for stage in stages: + shape = data[f'measure_{stage}'].shape + (len(sorted_paths),) + rec_mean[f'mean_{stage}'] = np.zeros(shape, dtype=float) + rec_sd[f'sd_{stage}'] = np.zeros(shape, dtype=float) + + # Log song file data: + for stage in stages: + mkey = f'measure_{stage}' + + if normalization == 'min': + # Minimum normalization: + data[mkey] /= data[mkey].min(axis=0, keepdims=True) + elif normalization == 'max': + # Maximum normalization: + data[mkey] /= data[mkey].max(axis=0, keepdims=True) + elif normalization == 'base': + # Noise baseline normalization: + data[mkey] /= data[mkey][0] + elif normalization == 'range': + # Min-max normalization: + min_measure = data[mkey].min(axis=0, keepdims=True) + max_measure = data[mkey].max(axis=0, keepdims=True) + data[mkey] = (data[mkey] - min_measure) / (max_measure - min_measure) + + file_data[stage][..., k] = data[mkey] + + # Get recording statistics: + for stage in stages: + rec_mean[f'mean_{stage}'][..., j] = np.nanmean(file_data[stage], axis=-1) + rec_sd[f'sd_{stage}'][..., j] = np.nanstd(file_data[stage], axis=-1) + + # Save condensed recording data: + archive = dict(scales=data['scales']) + archive.update(rec_mean) + archive.update(rec_sd) + save_name = save_path + species + '_' + mode + suffix + save_data(save_name, archive, config, overwrite=True) + +print('Done.') diff --git a/python/condense_inv_data_short.py b/python/condense_inv_data_short.py index 334c549..264d23c 100644 --- a/python/condense_inv_data_short.py +++ b/python/condense_inv_data_short.py @@ -21,16 +21,25 @@ sources = [ 'GBC', 'FTN' ] -stages = ['filt', 'env', 'conv', 'feat'] +stages = ['filt', 'env', 'inv', 'conv', 'feat'] search_path = '../data/inv/short/' save_path = '../data/inv/short/condensed/' # ANALYSIS SETTINGS: normalization = [ 'none', + 'min', + 'max', 'base', - 'range' - ][1] + 'range', + ][2] +suffix = dict( + none='_unnormed', + min='_norm-min', + max='_norm-max', + base='_norm-base', + range='_norm-range' +)[normalization] # EXECUTION: for i, species in enumerate(target_species): @@ -69,7 +78,13 @@ for i, species in enumerate(target_species): for stage in stages: mkey = f'measure_{stage}' - if normalization == 'base': + if normalization == 'min': + # Minimum normalization: + data[mkey] /= data[mkey].min(axis=0, keepdims=True) + elif normalization == 'max': + # Maximum normalization: + data[mkey] /= data[mkey].max(axis=0, keepdims=True) + elif normalization == 'base': # Noise baseline normalization: data[mkey] /= data[mkey][0] elif normalization == 'range': @@ -86,16 +101,9 @@ for i, species in enumerate(target_species): rec_sd[f'sd_{stage}'][..., j] = np.nanstd(file_data[stage], axis=-1) # Save condensed recording data: - save_name = save_path + species - if normalization == 'none': - save_name += '_unnormed' - elif normalization == 'base': - save_name += '_norm-base' - elif normalization == 'range': - save_name += '_norm-range' archive = dict(scales=data['scales']) archive.update(rec_mean) archive.update(rec_sd) - save_data(save_name, archive, config) + save_data(save_path + species + suffix, archive, config) print('Done.') diff --git a/python/condense_inv_data_thresh-lp.py b/python/condense_inv_data_thresh-lp.py index 7e4b82f..048e08a 100644 --- a/python/condense_inv_data_thresh-lp.py +++ b/python/condense_inv_data_thresh-lp.py @@ -26,7 +26,21 @@ search_path = '../data/inv/thresh_lp/' save_path = '../data/inv/thresh_lp/condensed/' # ANALYSIS SETTINGS: -with_noise = False +mode = ['pure', 'noise'][1] +normalization = [ + 'none', + 'min', + 'max', + 'base', + 'range', + ][0] +suffix = dict( + none='_unnormed', + min='_norm-min', + max='_norm-max', + base='_norm-base', + range='_norm-range' +)[normalization] plot_overview = False thresh_rel = np.array([0.5, 1, 3]) @@ -53,8 +67,7 @@ for i, species in enumerate(target_species): all_axes[thresh][0, i].set_title(shorten_species(species)) # Fetch all species-specific song files: - incl = 'noise' if with_noise else 'pure' - all_paths = search_files(species, incl=incl, ext='npz', dir=search_path) + all_paths = search_files(species, incl=mode, ext='npz', dir=search_path) # Sort song files by recording (one or more per source): sorted_paths = sort_files_by_rec(all_paths, sources) @@ -78,6 +91,21 @@ for i, species in enumerate(target_species): rec_sd = np.zeros(shape, dtype=float) # Log song file data: + if normalization == 'min': + # Minimum normalization: + measure /= measure.min(axis=0, keepdims=True) + elif normalization == 'max': + # Maximum normalization: + measure /= measure.max(axis=0, keepdims=True) + elif normalization == 'base': + # Noise baseline normalization: + measure /= measure[0] + elif normalization == 'range': + # Min-max normalization: + min_measure = measure.min(axis=0, keepdims=True) + max_measure = measure.max(axis=0, keepdims=True) + measure = (measure - min_measure) / (max_measure - min_measure) + file_data[..., k] = measure if plot_overview: @@ -100,11 +128,7 @@ for i, species in enumerate(target_species): axes[1, i].fill_between(scales, *spread, color=c, alpha=0.2) # Save condensed recording data: - save_name = save_path + species - if with_noise: - save_name += '_noise' - else: - save_name += '_pure' + save_name = save_path + species + '_' + mode + suffix archive = dict( scales=scales, mean_feat=rec_mean, diff --git a/python/fig_invariance_field.py b/python/fig_invariance_field.py new file mode 100644 index 0000000..83b99e2 --- /dev/null +++ b/python/fig_invariance_field.py @@ -0,0 +1,433 @@ +import plotstyle_plt +import numpy as np +import matplotlib.pyplot as plt +from itertools import product +from thunderhopper.filetools import search_files +from thunderhopper.modeltools import load_data +from thunderhopper.filtertools import find_kern_specs +from misc_functions import get_saturation +from color_functions import load_colors +from plot_functions import hide_axis, reorder_by_sd, ylimits, super_xlabel,\ + ylabel, title_subplot, plot_line, time_bar,\ + assign_colors, letter_subplot, letter_subplots +from IPython import embed + +def plot_snippets(axes, time, snippets, ymin=None, ymax=None, **kwargs): + ymin, ymax = ylimits(snippets, minval=ymin, maxval=ymax, pad=0.05) + handles = [] + for i, ax in enumerate(axes): + handles.append(plot_line(ax, time, snippets[:, ..., i], + ymin=ymin, ymax=ymax, **kwargs)) + return handles + +def plot_curves(ax, scales, measures, fill_kwargs={}, **kwargs): + if measures.ndim == 1: + ax.plot(scales, measures, **kwargs)[0] + return measures + median_measure = np.median(measures, axis=1) + spread_measure = [np.percentile(measures, 25, axis=1), + np.percentile(measures, 75, axis=1)] + ax.plot(scales, median_measure, **kwargs)[0] + ax.fill_between(scales, *spread_measure, **fill_kwargs) + return median_measure + +def reduce_kernel_set(data, inds, keyword, stages=['conv', 'feat']): + for stage in stages: + key = f'{keyword}_{stage}' + data[key] = data[key][:, inds, ...] + return data + +def crop_noise_snippets(snippets, nin, nout, stages=['filt', 'env', 'log', 'inv', 'conv', 'feat']): + half_offset = int((nin - nout) / 2) + segment = np.arange(half_offset, half_offset + nout) + for stage in stages: + key = f'snip_{stage}' + snippets[key] = snippets[key][segment, ...] + return snippets + + +# GENERAL SETTINGS: +search_target = 'Pseudochorthippus_parallelus' +stages = ['filt', 'env', 'log', 'inv', 'conv', 'feat'] +song_example = 'Pseudochorthippus_parallelus_micarray-short_JJ_20240815T160355-20240815T160755-1m10s690ms-1m13s614ms' +noise_example = 'merged_noise' +song_path = '../data/inv/field/song/' +noise_path = '../data/inv/field/noise/' +raw_path = search_files(search_target, incl='unnormed', dir=song_path + 'condensed/')[0] +base_path = search_files(search_target, incl='base', dir=song_path + 'condensed/')[0] +range_path = search_files(search_target, incl='range', dir=song_path + 'condensed/')[0] +song_snip_path = search_files(song_example, dir=song_path)[0] +noise_snip_path = search_files(noise_example, dir=noise_path)[0] +save_path = '../figures/fig_invariance_field.pdf' + +# ANALYSIS SETTINGS: +offset_distance = 10 # centimeter + +# SUBSET SETTINGS: +types = np.array([1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10]) +sigmas = np.array([0.001, 0.002, 0.004, 0.008, 0.016, 0.032]) +# types = [1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10] +# sigmas = [0.001, 0.002, 0.004, 0.008, 0.016, 0.032] +kernels = np.array([ + [1, 0.002], + [-1, 0.002], + [2, 0.004], + [-2, 0.004], + [3, 0.032], + [-3, 0.032] +]) +kernels = None + +# GRAPH SETTINGS: +fig_kwargs = dict( + figsize=(32/2.54, 32/2.54), +) +super_grid_kwargs = dict( + nrows=2, + ncols=1, + wspace=0, + hspace=0, + left=0, + right=1, + bottom=0, + top=1, + height_ratios=[3, 2] +) +subfig_specs = dict( + snip=(0, 0), + big=(1, 0), +) +snip_grid_kwargs = dict( + nrows=len(stages), + ncols=None, + wspace=0.1, + hspace=0.4, + left=0.11, + right=0.98, + bottom=0.08, + top=0.95 +) +big_grid_kwargs = dict( + nrows=1, + ncols=3, + wspace=0.4, + hspace=0, + left=snip_grid_kwargs['left'], + right=snip_grid_kwargs['right'], + bottom=0.13, + top=0.98 +) + +# PLOT SETTINGS: +fs = dict( + lab_norm=16, + lab_tex=20, + letter=22, + tit_norm=16, + tit_tex=20, + bar=16, +) +colors = load_colors('../data/stage_colors.npz') +conv_colors = load_colors('../data/conv_colors_all.npz') +feat_colors = load_colors('../data/feat_colors_all.npz') +lw = dict( + filt=0.25, + env=0.25, + log=0.25, + inv=0.25, + conv=0.25, + feat=1, + big=3, + plateau=1.5, +) +xlabels = dict( + big='distance [cm]', +) +ylabels = dict( + filt='$x_{\\text{filt}}$', + env='$x_{\\text{env}}$', + log='$x_{\\text{db}}$', + inv='$x_{\\text{adapt}}$', + conv='$c_i$', + feat='$f_i$', + big=['measure', 'rel. measure', 'norm. measure'] +) +xlab_big_kwargs = dict( + y=0, + fontsize=fs['lab_norm'], + ha='center', + va='bottom', +) +ylab_snip_kwargs = dict( + x=0, + fontsize=fs['lab_tex'], + rotation=0, + ha='left', + va='center' +) +ylab_big_kwargs = dict( + x=-0.2, + fontsize=fs['lab_norm'], + ha='center', + va='bottom', +) +yloc = dict( + filt=0.03, + env=0.01, + log=50, + inv=20, + conv=1, + feat=1, +) +title_kwargs = dict( + x=0.5, + yref=1, + ha='center', + va='top', + fontsize=fs['tit_norm'], +) +letter_snip_kwargs = dict( + x=0, + yref=0.5, + ha='left', + va='center', + fontsize=fs['letter'], +) +letter_big_kwargs = dict( + x=0, + y=1, + ha='left', + va='bottom', + fontsize=fs['letter'], +) +song_bar_time = 1 +song_bar_kwargs = dict( + dur=song_bar_time, + y0=-0.25, + y1=-0.1, + xshift=1, + color='k', + lw=0, + clip_on=False, + text_pos=(-0.1, 0.5), + text_str=f'${song_bar_time}\\,\\text{{s}}$', + text_kwargs=dict( + fontsize=fs['bar'], + ha='right', + va='center', + ) +) +noise_bar_time = 0.5 +noise_bar_kwargs = song_bar_kwargs.copy() +noise_bar_kwargs['dur'] = noise_bar_time +noise_bar_kwargs['text_str'] = f'${int(1000 * noise_bar_time)}\\,\\text{{ms}}$' +plateau_settings = dict( + low=0.05, + high=0.95, + first=True, + last=True, + condense=None, +) +plateau_line_kwargs = dict( + lw=lw['plateau'], + ls='--', + zorder=1, +) +plateau_dot_kwargs = dict( + marker='o', + markersize=8, + markeredgewidth=1, + clip_on=False, +) + +# EXECUTION: + +# Load raw (unnormed) invariance data: +data, config = load_data(raw_path, files='distances', keywords='mean') +dists = data['distances'] + offset_distance + +# Load snippet data: +song_snip, _ = load_data(song_snip_path, keywords='snip') +t_song = np.arange(song_snip['snip_filt'].shape[0]) / config['rate'] +noise_snip, _ = load_data(noise_snip_path, keywords='snip') +noise_snip = crop_noise_snippets(noise_snip, noise_snip['snip_filt'].shape[0], t_song.size) +t_noise = np.arange(noise_snip['snip_filt'].shape[0]) / config['rate'] +snip_dists = ['noise'] + [f'{int(d)}$\\,$cm' for d in dists] + +# Optional kernel subset: +reduce_kernels = False +if any(var is not None for var in [kernels, types, sigmas]): + kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas) + data = reduce_kernel_set(data, kern_inds, keyword='mean') + song_snip = reduce_kernel_set(song_snip, kern_inds, keyword='snip') + noise_snip = reduce_kernel_set(noise_snip, kern_inds, keyword='snip') + config['k_specs'] = config['k_specs'][kern_inds, :] + config['kernels'] = config['kernels'][:, kern_inds] + reduce_kernels = True + +# Adjust grid parameters: +snip_grid_kwargs['ncols'] = len(snip_dists) + +# Prepare overall graph: +fig = plt.figure(**fig_kwargs) +super_grid = fig.add_gridspec(**super_grid_kwargs) + +# Prepare stage-specific snippet axes: +snip_subfig = fig.add_subfigure(super_grid[subfig_specs['snip']]) +snip_grid = snip_subfig.add_gridspec(**snip_grid_kwargs) +snip_axes = np.zeros((snip_grid.nrows, snip_grid.ncols), dtype=object) +for i, j in product(range(snip_grid.nrows), range(snip_grid.ncols)): + ax = snip_subfig.add_subplot(snip_grid[i, j]) + ax.yaxis.set_major_locator(plt.MultipleLocator(yloc[stages[i]])) + hide_axis(ax, 'bottom') + if i == 0: + title = title_subplot(ax, snip_dists[j], ref=snip_subfig, **title_kwargs) + if j == 0: + ax.set_xlim(t_noise[0], t_noise[-1]) + ylabel(ax, ylabels[stages[i]], **ylab_snip_kwargs, transform=snip_subfig.transSubfigure) + else: + ax.set_xlim(t_song[0], t_song[-1]) + hide_axis(ax, 'left') + snip_axes[i, j] = ax +time_bar(snip_axes[-1, -1], **song_bar_kwargs) +# time_bar(snip_axes[-1, 0], **noise_bar_kwargs) +letter_subplot(snip_subfig, 'a', ref=title, **letter_snip_kwargs) + +# Prepare analysis axes: +big_subfig = fig.add_subfigure(super_grid[subfig_specs['big']]) +big_grid = big_subfig.add_gridspec(**big_grid_kwargs) +big_axes = np.zeros((big_grid.ncols,), dtype=object) +for i in range(big_grid.ncols): + ax = big_subfig.add_subplot(big_grid[0, i]) + ax.set_xlim(dists[0], 0) + # ax.set_xscale('symlog', linthresh=offset_distance, linscale=0.5) + ax.set_yscale('symlog', linthresh=0.01, linscale=0.1) + ylabel(ax, ylabels['big'][i], **ylab_big_kwargs) + # if i < (big_grid.ncols - 1): + # ax.set_ylim(scales[0], scales[-1]) + # else: + # ax.set_ylim(0, 1) + big_axes[i] = ax +super_xlabel(xlabels['big'], big_subfig, big_axes[0], big_axes[-1], **xlab_big_kwargs) +letter_subplots(big_axes, 'bcd', **letter_big_kwargs) + +if True: + # Plot filtered snippets: + plot_snippets(snip_axes[0, 1:], t_song, song_snip['snip_filt'], + c=colors['filt'], lw=lw['filt']) + plot_line(snip_axes[0, 0], t_noise, noise_snip['snip_filt'][:, 0], + *snip_axes[0, 1].get_ylim(), c=colors['filt'], lw=lw['filt']) + + # Plot envelope snippets: + plot_snippets(snip_axes[1, 1:], t_song, song_snip['snip_env'], + ymin=0, c=colors['env'], lw=lw['env']) + plot_line(snip_axes[1, 0], t_noise, noise_snip['snip_env'][:, 0], + *snip_axes[1, 1].get_ylim(), c=colors['env'], lw=lw['env']) + + # Plot logarithmic snippets: + plot_snippets(snip_axes[2, 1:], t_song, song_snip['snip_log'], + c=colors['log'], lw=lw['log']) + plot_line(snip_axes[2, 0], t_noise, noise_snip['snip_log'][:, 0], + *snip_axes[2, 1].get_ylim(), c=colors['log'], lw=lw['log']) + + # Plot invariant snippets: + plot_snippets(snip_axes[3, 1:], t_song, song_snip['snip_inv'], + c=colors['inv'], lw=lw['inv']) + plot_line(snip_axes[3, 0], t_noise, noise_snip['snip_inv'][:, 0], + *snip_axes[3, 1].get_ylim(), c=colors['inv'], lw=lw['inv']) + + # Plot kernel response snippets: + all_handles = plot_snippets(snip_axes[4, 1:], t_song, song_snip['snip_conv'], + c=colors['conv'], lw=lw['conv']) + for i, handles in enumerate(all_handles): + assign_colors(handles, config['k_specs'][:, 0], conv_colors) + reorder_by_sd(handles, song_snip['snip_conv'][..., i]) + handles = plot_line(snip_axes[4, 0], t_noise, noise_snip['snip_conv'][:, 0], + *snip_axes[4, 1].get_ylim(), c=colors['conv'], lw=lw['conv']) + assign_colors(handles, config['k_specs'][:, 0], conv_colors) + reorder_by_sd(handles, noise_snip['snip_conv'][:, 0]) + + # Plot feature snippets: + all_handles = plot_snippets(snip_axes[5, 1:], t_song, song_snip['snip_feat'], + ymin=0, ymax=1, c=colors['feat'], lw=lw['feat']) + for i, handles in enumerate(all_handles): + assign_colors(handles, config['k_specs'][:, 0], feat_colors) + reorder_by_sd(handles, song_snip['snip_feat'][..., i]) + handles = plot_line(snip_axes[5, 0], t_noise, noise_snip['snip_feat'][:, 0], + ymin=0, ymax=1, c=colors['feat'], lw=lw['feat']) + assign_colors(handles, config['k_specs'][:, 0], feat_colors) + reorder_by_sd(handles, noise_snip['snip_feat'][:, 0]) +del song_snip, noise_snip + +# Remember saturation points: +crit_inds, crit_dists = {}, {} + +# Unnormed measures: +for stage in stages: + # Plot average intensity measure across recordings: + curve = plot_curves(big_axes[0], dists, data[f'mean_{stage}'], + c=colors[stage], lw=lw['big'], + fill_kwargs=dict(color=colors[stage], alpha=0.25)) + # # Indicate saturation point: + # if stage in ['log', 'inv', 'conv', 'feat']: + # ind = get_saturation(curve, **plateau_settings)[1] + # dist = dists[ind] + # big_axes[0].plot(dist, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs, + # transform=big_axes[0].get_xaxis_transform()) + # big_axes[0].plot(dist, 0, mfc=colors[stage], mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs, + # transform=big_axes[0].get_xaxis_transform()) + # big_axes[0].vlines(dist, big_axes[0].get_ylim()[0], curve[ind], + # color=colors[stage], **plateau_line_kwargs) + # # Log saturation point: + # crit_inds[stage] = ind + # crit_dists[stage] = dist +del data + +# Noise baseline-related measures: +data, _ = load_data(base_path, files='scales', keywords='mean') +if reduce_kernels: + data = reduce_kernel_set(data, kern_inds, keyword='mean') +for stage in stages: + # Plot average intensity measure across recordings: + curve = plot_curves(big_axes[1], dists, data[f'mean_{stage}'], + c=colors[stage], lw=lw['big'], + fill_kwargs=dict(color=colors[stage], alpha=0.25)) + # Indicate saturation point: + # if stage in ['log', 'inv', 'conv', 'feat']: + # ind, dist = crit_inds[stage], crit_dists[stage] + # big_axes[1].plot(dist, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs, + # transform=big_axes[1].get_xaxis_transform()) + # big_axes[1].plot(dist, 0, mfc=colors[stage], mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs, + # transform=big_axes[1].get_xaxis_transform()) + # big_axes[1].vlines(dist, big_axes[1].get_ylim()[0], curve[ind], + # color=colors[stage], **plateau_line_kwargs) +del data + +# Min-max normalized measures: +data, _ = load_data(range_path, files='scales', keywords='mean') +if reduce_kernels: + data = reduce_kernel_set(data, kern_inds, keyword='mean') +for stage in stages: + # Plot average intensity measure across recordings: + curve = plot_curves(big_axes[2], dists, data[f'mean_{stage}'], + c=colors[stage], lw=lw['big'], + fill_kwargs=dict(color=colors[stage], alpha=0.25)) + + # # Indicate saturation point: + # if stage in ['log', 'inv', 'conv', 'feat']: + # ind, dist = crit_inds[stage], crit_dists[stage] + # big_axes[2].plot(dist, 0, c='w', alpha=1, zorder=5.5, **plateau_dot_kwargs, + # transform=big_axes[2].get_xaxis_transform()) + # big_axes[2].plot(dist, 0, mfc=colors[stage], mec='k', alpha=0.75, zorder=6, **plateau_dot_kwargs, + # transform=big_axes[2].get_xaxis_transform()) + # big_axes[2].vlines(dist, big_axes[2].get_ylim()[0], curve[ind], + # color=colors[stage], **plateau_line_kwargs) +del data + +# Save graph: +if save_path is not None: + fig.savefig(save_path) +plt.show() + +print('Done.') +embed() diff --git a/python/fig_invariance_full.py b/python/fig_invariance_full.py index 3068b2a..1d5d314 100644 --- a/python/fig_invariance_full.py +++ b/python/fig_invariance_full.py @@ -7,16 +7,18 @@ from thunderhopper.modeltools import load_data from thunderhopper.filtertools import find_kern_specs from misc_functions import get_saturation from color_functions import load_colors -from plot_functions import hide_axis, ylimits, xlabel, ylabel, title_subplot,\ - plot_line, strip_zeros, time_bar, set_clip_box,\ +from plot_functions import hide_axis, reorder_by_sd, ylimits, super_xlabel, ylabel, title_subplot,\ + plot_line, strip_zeros, time_bar, assign_colors,\ letter_subplot, letter_subplots from IPython import embed def plot_snippets(axes, time, snippets, ymin=None, ymax=None, **kwargs): ymin, ymax = ylimits(snippets, minval=ymin, maxval=ymax, pad=0.05) + handles = [] for i, ax in enumerate(axes): - plot_line(ax, time, snippets[:, ..., i], ymin=ymin, ymax=ymax, **kwargs) - return None + handles.append(plot_line(ax, time, snippets[:, ..., i], + ymin=ymin, ymax=ymax, **kwargs)) + return handles def plot_curves(ax, scales, measures, fill_kwargs={}, **kwargs): if measures.ndim == 1: @@ -73,8 +75,8 @@ save_path = '../figures/fig_invariance_full.pdf' exclude_zero = True # SUBSET SETTINGS: -types = np.array([1, -1, 2, -2, 3, -3, 4, -4]) -sigmas = np.array([0.004, 0.008, 0.016, 0.032]) +types = np.array([1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10]) +sigmas = np.array([0.001, 0.002, 0.004, 0.008, 0.016, 0.032]) # types = [1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10] # sigmas = [0.001, 0.002, 0.004, 0.008, 0.016, 0.032] kernels = np.array([ @@ -111,20 +113,20 @@ snip_grid_kwargs = dict( ncols=None, wspace=0.1, hspace=0.4, - left=0.08, - right=0.95, + left=0.11, + right=0.98, bottom=0.08, top=0.95 ) big_grid_kwargs = dict( nrows=1, ncols=3, - wspace=0.2, + wspace=0.4, hspace=0, left=snip_grid_kwargs['left'], - right=0.96, - bottom=0.2, - top=0.95 + right=snip_grid_kwargs['right'], + bottom=0.13, + top=0.98 ) # PLOT SETTINGS: @@ -137,6 +139,8 @@ fs = dict( bar=16, ) colors = load_colors('../data/stage_colors.npz') +conv_colors = load_colors('../data/conv_colors_all.npz') +feat_colors = load_colors('../data/feat_colors_all.npz') lw = dict( filt=0.25, env=0.25, @@ -154,10 +158,10 @@ ylabels = dict( filt='$x_{\\text{filt}}$', env='$x_{\\text{env}}$', log='$x_{\\text{db}}$', - inv='$x_{\\text{inv}}$', + inv='$x_{\\text{adapt}}$', conv='$c_i$', feat='$f_i$', - big=['intensity', 'rel. intensity', 'norm. intensity'] + big=['measure', 'rel. measure', 'norm. measure'] ) xlab_big_kwargs = dict( y=0, @@ -173,7 +177,7 @@ ylab_snip_kwargs = dict( va='center' ) ylab_big_kwargs = dict( - x=-0.12, + x=-0.2, fontsize=fs['lab_norm'], ha='center', va='bottom', @@ -183,7 +187,7 @@ yloc = dict( env=1000, log=50, inv=20, - conv=2, + conv=1, feat=1, ) title_kwargs = dict( @@ -262,6 +266,8 @@ if any(var is not None for var in [kernels, types, sigmas]): kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas) data = reduce_kernel_set(data, kern_inds, keyword='mean') snip = reduce_kernel_set(snip, kern_inds, keyword='snip') + config['k_specs'] = config['k_specs'][kern_inds, :] + config['kernels'] = config['kernels'][:, kern_inds] reduce_kernels = True # Adjust grid parameters: @@ -300,13 +306,13 @@ for i in range(big_grid.ncols): ax.set_xlim(scales[0], scales[-1]) ax.set_xscale('symlog', linthresh=scales[1], linscale=0.5) ax.set_yscale('symlog', linthresh=0.01, linscale=0.1) - xlabel(ax, xlabels['big'], transform=big_subfig, **xlab_big_kwargs) ylabel(ax, ylabels['big'][i], **ylab_big_kwargs) if i < (big_grid.ncols - 1): ax.set_ylim(scales[0], scales[-1]) else: ax.set_ylim(0, 1) big_axes[i] = ax +super_xlabel(xlabels['big'], big_subfig, big_axes[0], big_axes[-1], **xlab_big_kwargs) letter_subplots(big_axes, 'bcd', **letter_big_kwargs) if True: @@ -327,12 +333,18 @@ if True: c=colors['inv'], lw=lw['inv']) # Plot kernel response snippets: - plot_snippets(snip_axes[4, :], t_full, snip['snip_conv'], - c=colors['conv'], lw=lw['conv']) + all_handles = plot_snippets(snip_axes[4, :], t_full, snip['snip_conv'], + c=colors['conv'], lw=lw['conv']) + for i, handles in enumerate(all_handles): + assign_colors(handles, config['k_specs'][:, 0], conv_colors) + reorder_by_sd(handles, snip['snip_conv'][..., i]) # Plot feature snippets: - plot_snippets(snip_axes[5, :], t_full, snip['snip_feat'], - ymin=0, ymax=1, c=colors['feat'], lw=lw['feat']) + all_handles = plot_snippets(snip_axes[5, :], t_full, snip['snip_feat'], + ymin=0, ymax=1, c=colors['feat'], lw=lw['feat']) + for i, handles in enumerate(all_handles): + assign_colors(handles, config['k_specs'][:, 0], feat_colors) + reorder_by_sd(handles, snip['snip_feat'][..., i]) del snip # Remember saturation points: @@ -387,7 +399,7 @@ if exclude_zero: data = exclude_zero_scale(data, stages) if reduce_kernels: data = reduce_kernel_set(data, kern_inds, keyword='mean') -for stage in stages: +for stage in ['log', 'inv', 'conv', 'feat']: # Plot average intensity measure across recordings: curve = plot_curves(big_axes[2], scales, data[f'mean_{stage}'].mean(axis=-1), c=colors[stage], lw=lw['big'], diff --git a/python/fig_invariance_log-hp.py b/python/fig_invariance_log-hp.py index 3d1194f..4ac7ae2 100644 --- a/python/fig_invariance_log-hp.py +++ b/python/fig_invariance_log-hp.py @@ -270,7 +270,7 @@ plateau_dot_kwargs = dict( species_measures = {} thresh_inds = np.zeros((len(target_species),), dtype=int) for i, species in enumerate(target_species): - spec_path = search_files(species, dir='../data/inv/log_hp/condensed/')[0] + spec_path = search_files(species, incl=['noise', 'norm-base'], dir='../data/inv/log_hp/condensed/')[0] spec_data = dict(np.load(spec_path)) measure = spec_data['mean_inv'].mean(axis=-1) if exclude_zero: diff --git a/python/fig_invariance_log-hp_appendix.py b/python/fig_invariance_log-hp_appendix.py index a78b58c..8181760 100644 --- a/python/fig_invariance_log-hp_appendix.py +++ b/python/fig_invariance_log-hp_appendix.py @@ -108,7 +108,7 @@ for species, ax in zip(target_species, axes): color = colors[species] # Load species data: - path = search_files(species, dir=data_path)[0] + path = search_files(species, incl=['noise', 'norm-base'], dir=data_path)[0] data = dict(np.load(path)) scales = data['scales'] means = data['mean_inv'] diff --git a/python/fig_invariance_short.py b/python/fig_invariance_short.py index 7ff0ecd..6a6839b 100644 --- a/python/fig_invariance_short.py +++ b/python/fig_invariance_short.py @@ -7,16 +7,18 @@ from thunderhopper.modeltools import load_data from thunderhopper.filtertools import find_kern_specs from misc_functions import get_saturation from color_functions import load_colors -from plot_functions import hide_axis, ylimits, xlabel, ylabel, title_subplot,\ - plot_line, strip_zeros, time_bar,\ - letter_subplot, letter_subplots +from plot_functions import hide_axis, ylimits, super_xlabel, ylabel, title_subplot,\ + plot_line, strip_zeros, time_bar, assign_colors,\ + letter_subplot, letter_subplots, reorder_by_sd from IPython import embed def plot_snippets(axes, time, snippets, ymin=None, ymax=None, **kwargs): ymin, ymax = ylimits(snippets, minval=ymin, maxval=ymax, pad=0.05) + handles = [] for i, ax in enumerate(axes): - plot_line(ax, time, snippets[:, ..., i], ymin=ymin, ymax=ymax, **kwargs) - return None + handles.append(plot_line(ax, time, snippets[:, ..., i], + ymin=ymin, ymax=ymax, **kwargs)) + return handles def plot_curves(ax, scales, measures, fill_kwargs={}, **kwargs): if measures.ndim == 1: @@ -62,7 +64,7 @@ example_file = { 'Omocestus_rufipes': 'Omocestus_rufipes_DJN_32-40s724ms-48s779ms', 'Pseudochorthippus_parallelus': 'Pseudochorthippus_parallelus_GBC_88-6s678ms-9s32.3ms' }[target_species] -stages = ['filt', 'env', 'conv', 'feat'] +stages = ['filt', 'env', 'inv', 'conv', 'feat'] raw_path = search_files(target_species, incl='unnormed', dir='../data/inv/short/condensed/')[0] base_path = search_files(target_species, incl='base', dir='../data/inv/short/condensed/')[0] range_path = search_files(target_species, incl='range', dir='../data/inv/short/condensed/')[0] @@ -111,20 +113,20 @@ snip_grid_kwargs = dict( ncols=None, wspace=0.1, hspace=0.4, - left=0.08, - right=0.95, + left=0.11, + right=0.98, bottom=0.08, top=0.95 ) big_grid_kwargs = dict( nrows=1, ncols=3, - wspace=0.2, + wspace=0.4, hspace=0, left=snip_grid_kwargs['left'], - right=0.96, - bottom=0.2, - top=0.95 + right=snip_grid_kwargs['right'], + bottom=0.13, + top=0.98 ) # PLOT SETTINGS: @@ -137,10 +139,13 @@ fs = dict( bar=16, ) colors = load_colors('../data/stage_colors.npz') +conv_colors = load_colors('../data/conv_colors_all.npz') +feat_colors = load_colors('../data/feat_colors_all.npz') lw = dict( filt=0.25, env=0.25, conv=0.25, + inv=0.25, feat=1, big=3, plateau=1.5, @@ -151,9 +156,10 @@ xlabels = dict( ylabels = dict( filt='$x_{\\text{filt}}$', env='$x_{\\text{env}}$', + inv='$x_{\\text{adapt}}$', conv='$c_i$', feat='$f_i$', - big=['intensity', 'rel. intensity', 'norm. intensity'] + big=['measure', 'rel. measure', 'norm. measure'] ) xlab_big_kwargs = dict( y=0, @@ -169,7 +175,7 @@ ylab_snip_kwargs = dict( va='center' ) ylab_big_kwargs = dict( - x=-0.12, + x=-0.2, fontsize=fs['lab_norm'], ha='center', va='bottom', @@ -177,6 +183,7 @@ ylab_big_kwargs = dict( yloc = dict( filt=3000, env=1000, + inv=1000, conv=30, feat=1, ) @@ -294,13 +301,13 @@ for i in range(big_grid.ncols): ax.set_xlim(scales[0], scales[-1]) ax.set_xscale('symlog', linthresh=scales[1], linscale=0.5) ax.set_yscale('symlog', linthresh=0.01, linscale=0.1) - xlabel(ax, xlabels['big'], transform=big_subfig, **xlab_big_kwargs) ylabel(ax, ylabels['big'][i], **ylab_big_kwargs) if i < (big_grid.ncols - 1): ax.set_ylim(scales[0], scales[-1]) else: ax.set_ylim(0, 1) big_axes[i] = ax +super_xlabel(xlabels['big'], big_subfig, big_axes[0], big_axes[-1], **xlab_big_kwargs) letter_subplots(big_axes, 'bcd', **letter_big_kwargs) if True: @@ -312,13 +319,23 @@ if True: plot_snippets(snip_axes[1, :], t_full, snip['snip_env'], ymin=0, c=colors['env'], lw=lw['env']) + # Plot "adapted" snippets: + plot_snippets(snip_axes[2, :], t_full, snip['snip_inv'], + c=colors['inv'], lw=lw['inv']) + # Plot kernel response snippets: - plot_snippets(snip_axes[2, :], t_full, snip['snip_conv'], - c=colors['conv'], lw=lw['conv']) + all_handles = plot_snippets(snip_axes[3, :], t_full, snip['snip_conv'], + c=colors['conv'], lw=lw['conv']) + for i, handles in enumerate(all_handles): + assign_colors(handles, config['k_specs'][:, 0], conv_colors) + reorder_by_sd(handles, snip['snip_conv'][..., i]) # Plot feature snippets: - plot_snippets(snip_axes[3, :], t_full, snip['snip_feat'], - ymin=0, ymax=1, c=colors['feat'], lw=lw['feat']) + all_handles = plot_snippets(snip_axes[4, :], t_full, snip['snip_feat'], + ymin=0, ymax=1, c=colors['feat'], lw=lw['feat']) + for i, handles in enumerate(all_handles): + assign_colors(handles, config['k_specs'][:, 0], feat_colors) + reorder_by_sd(handles, snip['snip_feat'][..., i]) del snip # Remember saturation points: @@ -373,7 +390,7 @@ if exclude_zero: data = exclude_zero_scale(data, stages) if reduce_kernels: data = reduce_kernel_set(data, kern_inds, keyword='mean') -for stage in stages: +for stage in ['feat']: # Plot average intensity measure across recordings: curve = plot_curves(big_axes[2], scales, data[f'mean_{stage}'].mean(axis=-1), c=colors[stage], lw=lw['big'], diff --git a/python/fig_invariance_thresh-lp_appendix.py b/python/fig_invariance_thresh-lp_appendix.py index d45a961..4887eec 100644 --- a/python/fig_invariance_thresh-lp_appendix.py +++ b/python/fig_invariance_thresh-lp_appendix.py @@ -9,6 +9,7 @@ from misc_functions import shorten_species from IPython import embed # GENERAL SETTINGS: +mode = ['pure', 'noise'][1] target_species = [ 'Chorthippus_biguttulus', 'Chorthippus_mollis', @@ -19,7 +20,7 @@ target_species = [ 'Pseudochorthippus_parallelus', ] data_path = '../data/inv/thresh_lp/condensed/' -save_path = '../figures/fig_invariance_thresh-lp_appendix.pdf' +save_path = f'../figures/fig_invariance_thresh-lp_{mode}_appendix.pdf' # ANALYSIS SETTINGS: exclude_zero = True @@ -145,7 +146,7 @@ for i, (species, spec_axes) in enumerate(zip(target_species, axes.T)): title_subplot(spec_axes[0], shorten_species(species), ref=fig, **title_kwargs) # Load species data: - path = search_files(species, dir=data_path)[0] + path = search_files(species, incl=[mode, 'unnormed'], dir=data_path)[0] data, config = load_data(path, files=['scales', 'mean_feat', 'sd_feat', 'thresh_rel']) scales = data['scales'] means = data['mean_feat'] diff --git a/python/fig_invariance_thresh-lp_species.py b/python/fig_invariance_thresh-lp_species.py index e3c7965..b2d2fde 100644 --- a/python/fig_invariance_thresh-lp_species.py +++ b/python/fig_invariance_thresh-lp_species.py @@ -537,8 +537,8 @@ for i, species in enumerate(target_species): text_str=f'${spec_bar_times[species]}\\,\\text{{s}}$') # Fetch species-specific invariance files: - pure_path = search_files(species, incl='pure', dir='../data/inv/thresh_lp/condensed/')[0] - noise_path = search_files(species, incl='noise', dir='../data/inv/thresh_lp/condensed/')[0] + pure_path = search_files(species, incl=['pure', 'unnormed'], dir='../data/inv/thresh_lp/condensed/')[0] + noise_path = search_files(species, incl=['noise', 'unnormed'], dir='../data/inv/thresh_lp/condensed/')[0] # Load invariance data: pure_data, config = load_data(pure_path, **load_kwargs) diff --git a/python/fig_kernel_sd_perc_appendix.py b/python/fig_kernel_sd_perc_appendix.py new file mode 100644 index 0000000..375d936 --- /dev/null +++ b/python/fig_kernel_sd_perc_appendix.py @@ -0,0 +1,69 @@ +import plotstyle_plt +import numpy as np +import matplotlib.pyplot as plt +from thunderhopper.modeltools import load_data +from thunderhopper.filetools import search_files, crop_paths +from plot_functions import xlabel, ylabel +from IPython import embed + +# Analysis settings: +mode = ['thresh_lp', 'full', 'short', 'field'][3] +thresh_path = f'../data/inv/{mode}/thresholds.npz' +save_path = f'../figures/fig_kernel_sd_perc_{mode}_appendix.pdf' + +# Plot settings: +fig_kwargs = dict( + figsize=(32/2.54, 16/2.54), + nrows=1, + ncols=1, + gridspec_kw=dict( + wspace=0, + hspace=0, + left=0.09, + right=0.99, + bottom=0.11, + top=0.98, + ) +) +line_kwargs = dict( + color='black', + lw=1, + alpha=0.5, +) +xlab = '$\\text{multiple of }\\sigma_{k_i}$' +ylab = '$P\\,(c_i > \\Theta_i)$' +xlab_kwargs = dict( + y=0, + fontsize=20, + ha='center', + va='bottom', +) +ylab_kwargs = dict( + x=0, + fontsize=20, + ha='center', + va='top', +) + +# Load threshold data: +data = dict(np.load(thresh_path)) +factors = data['factors'] +perc = data['percs'] + +# Prepare graph: +fig, ax = plt.subplots(**fig_kwargs) +ax.set_xlim(factors[0], factors[-1]) +ax.set_ylim(0, 1) +ylabel(ax, ylab, transform=fig.transFigure, **ylab_kwargs) +xlabel(ax, xlab, transform=fig.transFigure, **xlab_kwargs) + +# Plotting: +ax.plot(factors, perc, **line_kwargs) + +# Save figure: +fig.savefig(save_path) + +plt.show() +print('Done.') + + diff --git a/python/misc_functions.py b/python/misc_functions.py index e921322..88a1267 100644 --- a/python/misc_functions.py +++ b/python/misc_functions.py @@ -1,6 +1,7 @@ import numpy as np from scipy.stats import gaussian_kde from thunderhopper.filetools import crop_paths +from IPython import embed def shorten_species(name): genus, species = name.split('_') @@ -48,6 +49,40 @@ def sort_files_by_rec(paths, sources=['BM04', 'BM93', 'DJN', 'GBC', 'FTN']): sorted_paths = [path for paths in sorted_paths.values() for path in paths] return sorted_paths +def get_thresholds(data=None, path=None, perc=None, factor=None, + direct=False, which=None): + + def get_inds(nearest, which): + if which == 'floor': + nearest[nearest < 0] = np.inf + return nearest.argmin(axis=0) + elif which == 'ceil': + nearest[nearest > 0] = -np.inf + return nearest.argmax(axis=0) + return np.abs(nearest).argmin(axis=0) + + if data is None: + # Load threshold data: + data = dict(np.load(path)) + + # From SD scaling factor: + if factor is not None: + if direct: + # Scale SDs directly by factor: + return data['sds'] * factor, factor, None + + # Link to supra-thresh proportion: + nearest = np.atleast_2d(factor) - data['factors'][:, None] + inds = get_inds(nearest, which) + factors = data['factors'][inds] + return data['sds'] * factors, factors, data['percs'][inds, :] + + # From supra-thresh proportion: + nearest = perc - data['percs'] + inds = get_inds(nearest, which) + factors = data['factors'][inds] + return data['sds'] * factors, factors, data['percs'][inds, :] + def get_histogram(data, edges=None, nbins=50, pad=0.1, shared=True): if edges is None: axis = None if shared else 0 diff --git a/python/save_field_data.py b/python/save_field_data.py index 826d319..404caf7 100644 --- a/python/save_field_data.py +++ b/python/save_field_data.py @@ -12,7 +12,7 @@ mode = ['song', 'noise'][1] input_folder = f'../data/field/raw/{mode}/' output_folder = f'../data/field/processed/{mode}/' stages = ['raw', 'norm'] -if False: +if True: # Overwrites edited: stages.append('songs') diff --git a/python/save_inv_data_field.py b/python/save_inv_data_field.py index 8fb64e1..460e48b 100644 --- a/python/save_inv_data_field.py +++ b/python/save_inv_data_field.py @@ -6,16 +6,20 @@ from thunderhopper.model import process_signal from IPython import embed # GENERAL SETTINGS: -target = '*' -example_file = 'Pseudochorthippus_parallelus_micarray-short_JJ_20240815T160355-20240815T160755-1m10s690ms-1m13s614ms' -mode = ['song', 'noise'][1] +mode = ['song', 'noise'][0] +example_file = dict( + song='Pseudochorthippus_parallelus_micarray-short_JJ_20240815T160355-20240815T160755-1m10s690ms-1m13s614ms', + noise='merged_noise' +)[mode] search_path = f'../data/field/processed/{mode}/' -data_paths = search_files(target, ext='npz', dir=search_path) +data_paths = search_files('*', ext='npz', dir=search_path) +ref_path = '../data/inv/field/ref_measures.npz' stages = ['raw', 'filt', 'env', 'log', 'inv', 'conv', 'feat'] save_path = f'../data/inv/field/{mode}/' # ANALYSIS SETTINGS: -distances = np.load('../data/field/recording_distances.npy') +distances = np.load('../data/field/recording_distances.npy')[::-1] +thresh_rel = 0.5 # SUBSET SETTINGS: kernels = np.array([ @@ -30,6 +34,11 @@ kernels = None types = None#np.array([-1]) sigmas = None#np.array([0.001, 0.002, 0.004, 0.008, 0.016, 0.032]) +# PREPARATION: +if thresh_rel is not None: + # Get threshold values from pure-noise response SD: + thresh_abs = np.load(ref_path)['conv'] * thresh_rel + # EXECUTION: for data_path, name in zip(data_paths, crop_paths(data_paths)): save_detailed = example_file in name @@ -39,6 +48,10 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): data, config = load_data(data_path, files='raw') song, rate = data['raw'], config['rate'] + if thresh_rel is not None: + # Set kernel-specific thresholds: + config['feat_thresh'] = thresh_abs + # Reduce to kernel subset: if any(var is not None for var in [kernels, types, sigmas]): kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas) @@ -59,6 +72,9 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): # Process snippet: signals, rates = process_signal(config, returns=stages, signal=song, rate=rate) + for stage in stages: + # Sort largest to smallest distance: + signals[stage] = signals[stage][..., ::-1] # Store results: for stage in stages: @@ -68,6 +84,10 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): measures[mkey] = signals[stage][segment, ...].mean(axis=0) else: measures[mkey] = signals[stage][segment, ...].std(axis=0) + + if measures[mkey].ndim == 2: + # Make shape (distances, kernels): + measures[mkey] = np.moveaxis(measures[mkey], 1, 0) # Log optional snippet data: if save_detailed: diff --git a/python/save_inv_data_full.py b/python/save_inv_data_full.py index 201f1c6..645ba4b 100644 --- a/python/save_inv_data_full.py +++ b/python/save_inv_data_full.py @@ -1,9 +1,9 @@ import numpy as np -import matplotlib.pyplot as plt from thunderhopper.modeltools import load_data, save_data from thunderhopper.filetools import search_files, crop_paths from thunderhopper.filtertools import find_kern_specs from thunderhopper.model import process_signal +from thunderhopper.filters import sosfilter from misc_functions import draw_noise_segment from IPython import embed @@ -16,7 +16,7 @@ target_species = [ 'Gomphocerippus_rufus', 'Omocestus_rufipes', 'Pseudochorthippus_parallelus', -][4] +][5] example_file = { 'Chorthippus_biguttulus': 'Chorthippus_biguttulus_GBC_94-17s73.1ms-19s977ms', 'Chorthippus_mollis': 'Chorthippus_mollis_DJN_41_T28C-46s4.58ms-1m15s697ms', @@ -28,34 +28,26 @@ example_file = { }[target_species] data_paths = search_files(target_species, dir='../data/processed/') noise_path = '../data/processed/white_noise_sd-1.npz' -ref_path = '../data/inv/full/ref_measures.npz' +thresh_path = '../data/inv/full/thresholds.npz' stages = ['filt', 'env', 'log', 'inv', 'conv', 'feat'] +pre_stages = stages[:-1] save_path = '../data/inv/full/' # ANALYSIS SETTINGS: example_scales = np.array([0.1, 1, 10, 30, 100, 300]) scales = np.geomspace(0.01, 10000, 500) scales = np.unique(np.concatenate(([0], scales, example_scales))) -thresh_rel = 0.5 +thresh_rel = np.array([0, 0.5, 1, 1.5, 2, 2.5, 3]) # SUBSET SETTINGS: -kernels = np.array([ - [1, 0.002], - [-1, 0.002], - [2, 0.004], - [-2, 0.004], - [3, 0.032], - [-3, 0.032] -]) kernels = None -types = None#np.array([-1]) -sigmas = None#np.array([0.001, 0.002, 0.004, 0.008, 0.016, 0.032]) +types = None +sigmas = None # PREPARATION: pure_noise = np.load(noise_path)['raw'] -if thresh_rel is not None: - # Get threshold values from pure-noise response SD: - thresh_abs = np.load(ref_path)['conv'] * thresh_rel +thresh_data = dict(np.load(thresh_path)) +thresh_abs = thresh_rel[:, None] * thresh_data['sds'][None, :] # EXECUTION: for data_path, name in zip(data_paths, crop_paths(data_paths)): @@ -66,17 +58,13 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): data, config = load_data(data_path, files='raw') song, rate = data['raw'], config['rate'] - if thresh_rel is not None: - # Set kernel-specific thresholds: - config['feat_thresh'] = thresh_abs - # Reduce to kernel subset: if any(var is not None for var in [kernels, types, sigmas]): kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas) config['kernels'] = config['kernels'][:, kern_inds] config['k_specs'] = config['k_specs'][kern_inds, :] config['k_props'] = [config['k_props'][i] for i in kern_inds] - config['feat_thresh'] = config['feat_thresh'][kern_inds] + thresh_abs = thresh_abs[:, kern_inds] # Get song segment to be analyzed: time = np.arange(song.shape[0]) / rate @@ -99,8 +87,8 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): measure_log=np.zeros(shape_low, dtype=float), measure_inv=np.zeros(shape_low, dtype=float), measure_conv=np.zeros(shape_high, dtype=float), - measure_feat=np.zeros(shape_high, dtype=float) - ) + measure_feat=np.zeros(shape_high + (thresh_rel.size,), dtype=float) + ) if save_detailed: # Prepare optional storage: shape_low = (song.shape[0], example_scales.size) @@ -111,7 +99,7 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): snip_log=np.zeros(shape_low, dtype=float), snip_inv=np.zeros(shape_low, dtype=float), snip_conv=np.zeros(shape_high, dtype=float), - snip_feat=np.zeros(shape_high, dtype=float) + snip_feat=np.zeros(shape_high + (thresh_rel.size,), dtype=float) ) # Execute piecewise: @@ -121,28 +109,40 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): # Rescale song and add noise: scaled = song * scale + noise - # Process mixture: - signals, rates = process_signal(config, returns=stages, + # Process mixture (excluding features): + signals, rates = process_signal(config, returns=pre_stages, signal=scaled, rate=rate) - # Store results: - for stage in stages: + # Store non-feature results: + for stage in pre_stages: # Log intensity measures: - mkey = f'measure_{stage}' - if stage == 'feat': - measures[mkey][i] = signals[stage][segment, :].mean(axis=0) - else: - measures[mkey][i] = signals[stage][segment, ...].std(axis=0) + measures[f'measure_{stage}'][i] = signals[stage][segment, ...].std(axis=0) # Log optional snippet data: if save_detailed and scale in example_scales: scale_ind = np.nonzero(example_scales == scale)[0][0] snippets[f'snip_{stage}'][:, ..., scale_ind] = signals[stage] + # Execute piecewise again: + for j, thresholds in enumerate(thresh_abs): + # Finalize processing: + feat = sosfilter((signals['conv'] > thresholds).astype(float), + rate, config['feat_fcut'], 'lp', + padtype='fixed', padlen=config['padlen']) + + # Log intensity measure: + measures['measure_feat'][i, :, j] = feat[segment, :].mean(axis=0) + + # Log optional snippet data: + if save_detailed and scale in example_scales: + snippets['snip_feat'][:, :, scale_ind, j] = feat + # Save analysis results: if save_path is not None: data = dict( scales=scales, example_scales=example_scales, + thresh_rel=thresh_rel, + thresh_abs=thresh_abs, ) data.update(measures) if save_detailed: diff --git a/python/save_inv_data_full_backup.py b/python/save_inv_data_full_backup.py new file mode 100644 index 0000000..cbb6547 --- /dev/null +++ b/python/save_inv_data_full_backup.py @@ -0,0 +1,152 @@ +import numpy as np +import matplotlib.pyplot as plt +from thunderhopper.modeltools import load_data, save_data +from thunderhopper.filetools import search_files, crop_paths +from thunderhopper.filtertools import find_kern_specs +from thunderhopper.model import process_signal +from misc_functions import draw_noise_segment +from IPython import embed + +# GENERAL SETTINGS: +target_species = [ + 'Chorthippus_biguttulus', + 'Chorthippus_mollis', + 'Chrysochraon_dispar', + 'Euchorthippus_declivus', + 'Gomphocerippus_rufus', + 'Omocestus_rufipes', + 'Pseudochorthippus_parallelus', +][5] +example_file = { + 'Chorthippus_biguttulus': 'Chorthippus_biguttulus_GBC_94-17s73.1ms-19s977ms', + 'Chorthippus_mollis': 'Chorthippus_mollis_DJN_41_T28C-46s4.58ms-1m15s697ms', + 'Chrysochraon_dispar': 'Chrysochraon_dispar_DJN_26_T28C_DT-32s134ms-34s432ms', + 'Euchorthippus_declivus': 'Euchorthippus_declivus_FTN_79-2s167ms-2s563ms', + 'Gomphocerippus_rufus': 'Gomphocerippus_rufus_FTN_91-3-884ms-10s427ms', + 'Omocestus_rufipes': 'Omocestus_rufipes_DJN_32-40s724ms-48s779ms', + 'Pseudochorthippus_parallelus': 'Pseudochorthippus_parallelus_GBC_88-6s678ms-9s32.3ms' +}[target_species] +data_paths = search_files(target_species, dir='../data/processed/') +noise_path = '../data/processed/white_noise_sd-1.npz' +thresh_path = '../data/inv/full/thresholds.npz' +stages = ['filt', 'env', 'log', 'inv', 'conv', 'feat'] +save_path = '../data/inv/full/' + +# ANALYSIS SETTINGS: +example_scales = np.array([0.1, 1, 10, 30, 100, 300]) +scales = np.geomspace(0.01, 10000, 500) +scales = np.unique(np.concatenate(([0], scales, example_scales))) +thresh_rel = 0.5 + +# SUBSET SETTINGS: +kernels = np.array([ + [1, 0.002], + [-1, 0.002], + [2, 0.004], + [-2, 0.004], + [3, 0.032], + [-3, 0.032] +]) +kernels = None +types = None#np.array([-1]) +sigmas = None#np.array([0.001, 0.002, 0.004, 0.008, 0.016, 0.032]) + +# PREPARATION: +pure_noise = np.load(noise_path)['raw'] +if thresh_rel is not None: + # Get threshold values from pure-noise response SD: + thresh_abs = np.load(ref_path)['conv'] * thresh_rel + +# EXECUTION: +for data_path, name in zip(data_paths, crop_paths(data_paths)): + save_detailed = example_file in name + print(f'Processing {name}') + + # Get song recording (prior to anything): + data, config = load_data(data_path, files='raw') + song, rate = data['raw'], config['rate'] + + if thresh_rel is not None: + # Set kernel-specific thresholds: + config['feat_thresh'] = thresh_abs + + # Reduce to kernel subset: + if any(var is not None for var in [kernels, types, sigmas]): + kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas) + config['kernels'] = config['kernels'][:, kern_inds] + config['k_specs'] = config['k_specs'][kern_inds, :] + config['k_props'] = [config['k_props'][i] for i in kern_inds] + config['feat_thresh'] = config['feat_thresh'][kern_inds] + + # Get song segment to be analyzed: + time = np.arange(song.shape[0]) / rate + start, end = data['songs_0'].ravel() + segment = (time >= start) & (time <= end) + + # Normalize song component: + song /= song[segment].std(axis=0) + + # Get normalized noise component: + noise = draw_noise_segment(pure_noise, song.shape[0]) + noise /= noise[segment].std() + + # Prepare storage: + shape_low = (scales.size,) + shape_high = (scales.size, config['k_specs'].shape[0]) + measures = dict( + measure_filt=np.zeros(shape_low, dtype=float), + measure_env=np.zeros(shape_low, dtype=float), + measure_log=np.zeros(shape_low, dtype=float), + measure_inv=np.zeros(shape_low, dtype=float), + measure_conv=np.zeros(shape_high, dtype=float), + measure_feat=np.zeros(shape_high, dtype=float) + ) + if save_detailed: + # Prepare optional storage: + shape_low = (song.shape[0], example_scales.size) + shape_high = (song.shape[0], config['k_specs'].shape[0], example_scales.size) + snippets = dict( + snip_filt=np.zeros(shape_low, dtype=float), + snip_env=np.zeros(shape_low, dtype=float), + snip_log=np.zeros(shape_low, dtype=float), + snip_inv=np.zeros(shape_low, dtype=float), + snip_conv=np.zeros(shape_high, dtype=float), + snip_feat=np.zeros(shape_high, dtype=float) + ) + + # Execute piecewise: + for i, scale in enumerate(scales): + print('Simulating scale ', scale) + + # Rescale song and add noise: + scaled = song * scale + noise + + # Process mixture: + signals, rates = process_signal(config, returns=stages, + signal=scaled, rate=rate) + # Store results: + for stage in stages: + # Log intensity measures: + mkey = f'measure_{stage}' + if stage == 'feat': + measures[mkey][i] = signals[stage][segment, :].mean(axis=0) + else: + measures[mkey][i] = signals[stage][segment, ...].std(axis=0) + + # Log optional snippet data: + if save_detailed and scale in example_scales: + scale_ind = np.nonzero(example_scales == scale)[0][0] + snippets[f'snip_{stage}'][:, ..., scale_ind] = signals[stage] + + # Save analysis results: + if save_path is not None: + data = dict( + scales=scales, + example_scales=example_scales, + ) + data.update(measures) + if save_detailed: + data.update(snippets) + save_data(save_path + name, data, config, overwrite=True) +print('Done.') +embed() diff --git a/python/save_inv_data_rect-lp.py b/python/save_inv_data_rect-lp.py new file mode 100644 index 0000000..eb36a6b --- /dev/null +++ b/python/save_inv_data_rect-lp.py @@ -0,0 +1,108 @@ +import numpy as np +from thunderhopper.modeltools import load_data, save_data +from thunderhopper.filetools import search_files, crop_paths +from thunderhopper.filters import sosfilter +from misc_functions import draw_noise_segment +from IPython import embed + +# GENERAL SETTINGS: +example_file = 'Omocestus_rufipes_DJN_32-40s724ms-48s779ms' +data_paths = search_files('*', excl='noise', dir='../data/processed/') +noise_path = '../data/processed/white_noise_sd-1.npz' +save_path = '../data/inv/rect_lp/' + +# ANALYSIS SETTINGS: +mode = ['pure', 'noise'][1] +example_scales = np.array([0.1, 1, 10, 30, 100, 300]) +scales = np.geomspace(0.01, 10000, 1000) +scales = np.unique(np.concatenate(([0], scales, example_scales))) +cutoffs = np.array([np.nan, 125, 250, 500]) + +# PREPARATION: +if mode == 'noise': + pure_noise = np.load(noise_path)['raw'] + +# EXECUTION: +for data_path, name in zip(data_paths, crop_paths(data_paths)): + save_detailed = example_file in name + print(f'Processing {name}') + + # Get filtered song (prior to envelope extraction): + data, config = load_data(data_path, files='raw') + song, rate = data['raw'], config['rate'] + + # Get song segment to be analyzed: + time = np.arange(song.shape[0]) / rate + start, end = data['songs_0'].ravel() + segment = (time >= start) & (time <= end) + + # Normalize song component: + song /= song[segment].std() + if mode == 'noise': + # Get normalized noise component: + noise = draw_noise_segment(pure_noise, song.shape[0]) + noise /= noise[segment].std() + + # Prepare storage: + measure_filt = np.zeros_like(scales) + measure_env = np.zeros((scales.size, len(cutoffs)), dtype=float) + if save_detailed: + # Prepare optional storage: + shape = (song.shape[0], example_scales.size) + snip_raw = np.zeros(shape) + snip_filt = np.zeros(shape) + snip_env = np.zeros(shape + (len(cutoffs),)) + + # Execute piecewise: + for i, scale in enumerate(scales): + + # Get scaled mixture: + mix = song * scale + if mode == 'noise': + mix += noise + + # Process mixture: + mix = sosfilter(mix, rate, config['bp_fcut'], 'bp', + padtype='fixed', padlen=config['padlen']) + mix_rect = np.abs(mix) + + # Store non-envelope results: + measure_filt[i] = mix[segment].std() + if save_detailed and scale in example_scales: + scale_ind = np.nonzero(example_scales == scale)[0][0] + snip_raw[:, scale_ind] = mix + snip_filt[:, scale_ind] = mix + + # Process piecewise again: + for j, cutoff in enumerate(cutoffs): + if np.isnan(cutoff): + mix_env = mix_rect + else: + mix_env = sosfilter(mix_rect, rate, cutoff, 'lp', + padtype='even', padlen=config['padlen']) + + # Store envelope results: + measure_env[i, j] = mix_env[segment].std() + if save_detailed and scale in example_scales: + snip_env[:, scale_ind, j] = mix_env + + # Save analysis results: + if save_path is not None: + archive = dict( + scales=scales, + example_scales=example_scales, + cutoffs=cutoffs, + measure_filt=measure_filt, + measure_env=measure_env, + ) + if save_detailed: + archive.update( + snip_raw=snip_raw, + snip_filt=snip_filt, + snip_env=snip_env, + ) + save_name = save_path + name + '_' + mode + save_data(save_name, archive, config, overwrite=True) + +print('Done.') +embed() diff --git a/python/save_inv_data_short.py b/python/save_inv_data_short.py index 42116ad..7a4aac4 100644 --- a/python/save_inv_data_short.py +++ b/python/save_inv_data_short.py @@ -17,7 +17,7 @@ target_species = [ 'Gomphocerippus_rufus', 'Omocestus_rufipes', 'Pseudochorthippus_parallelus', -][6] +][5] example_file = { 'Chorthippus_biguttulus': 'Chorthippus_biguttulus_GBC_94-17s73.1ms-19s977ms', 'Chorthippus_mollis': 'Chorthippus_mollis_DJN_41_T28C-46s4.58ms-1m15s697ms', @@ -29,7 +29,7 @@ example_file = { }[target_species] data_paths = search_files(target_species, dir='../data/processed/') noise_path = '../data/processed/white_noise_sd-1.npz' -ref_path = '../data/inv/short/ref_measures.npz' +thresh_path = '../data/inv/short/thresholds.npz' pre_stages = ['filt', 'env'] stages = pre_stages + ['inv', 'conv', 'feat'] save_path = '../data/inv/short/' @@ -38,26 +38,17 @@ save_path = '../data/inv/short/' example_scales = np.array([0.1, 1, 10, 30, 100, 300]) scales = np.geomspace(0.01, 10000, 500) scales = np.unique(np.concatenate(([0], scales, example_scales))) -thresh_rel = 0.5 +thresh_rel = np.array([0, 0.5, 1, 1.5, 2, 2.5, 3]) # SUBSET SETTINGS: -kernels = np.array([ - [1, 0.002], - [-1, 0.002], - [2, 0.004], - [-2, 0.004], - [3, 0.032], - [-3, 0.032] -]) kernels = None -types = None#np.array([-1]) -sigmas = None#np.array([0.001, 0.002, 0.004, 0.008, 0.016, 0.032]) +types = None +sigmas = None # PREPARATION: pure_noise = np.load(noise_path)['raw'] -if thresh_rel is not None: - # Get threshold values from pure-noise response SD: - thresh_abs = np.load(ref_path)['conv'] * thresh_rel +thresh_data = dict(np.load(thresh_path)) +thresh_abs = thresh_rel[:, None] * thresh_data['sds'][None, :] # EXECUTION: for data_path, name in zip(data_paths, crop_paths(data_paths)): @@ -68,17 +59,13 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): data, config = load_data(data_path, files='raw') song, rate = data['raw'], config['rate'] - if thresh_rel is not None: - # Set kernel-specific thresholds: - config['feat_thresh'] = thresh_abs - # Reduce to kernel subset: if any(var is not None for var in [kernels, types, sigmas]): kern_inds = find_kern_specs(config['k_specs'], kernels, types, sigmas) config['kernels'] = config['kernels'][:, kern_inds] config['k_specs'] = config['k_specs'][kern_inds, :] config['k_props'] = [config['k_props'][i] for i in kern_inds] - config['feat_thresh'] = config['feat_thresh'][kern_inds] + thresh_abs = thresh_abs[:, kern_inds] # Get song segment to be analyzed: time = np.arange(song.shape[0]) / rate @@ -100,7 +87,7 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): measure_env=np.zeros(shape_low, dtype=float), measure_inv=np.zeros(shape_low, dtype=float), measure_conv=np.zeros(shape_high, dtype=float), - measure_feat=np.zeros(shape_high, dtype=float) + measure_feat=np.zeros(shape_high + (thresh_rel.size,), dtype=float) ) if save_detailed: # Prepare optional storage: @@ -111,7 +98,7 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): snip_env=np.zeros(shape_low, dtype=float), snip_inv=np.zeros(shape_low, dtype=float), snip_conv=np.zeros(shape_high, dtype=float), - snip_feat=np.zeros(shape_high, dtype=float) + snip_feat=np.zeros(shape_high + (thresh_rel.size,), dtype=float) ) # Execute piecewise: @@ -129,29 +116,38 @@ for data_path, name in zip(data_paths, crop_paths(data_paths)): signals['inv'] = sosfilter(signals['env'], rate, config['inv_fcut'], 'hp', padtype='constant', padlen=config['padlen']) signals['conv'] = convolve_kernels(signals['inv'], config['kernels'], config['k_specs']) - signals['feat'] = sosfilter((signals['conv'] > config['feat_thresh']).astype(float), - rate, config['feat_fcut'], 'lp', - padtype='fixed', padlen=config['padlen']) - # Store results: - for stage in stages: + # Store non-feature results: + for stage in stages[:-1]: # Log intensity measures: - mkey = f'measure_{stage}' - if stage == 'feat': - measures[mkey][i] = signals[stage][segment, :].mean(axis=0) - else: - measures[mkey][i] = signals[stage][segment, ...].std(axis=0) + measures[f'measure_{stage}'][i] = signals[stage][segment, ...].std(axis=0) # Log optional snippet data: if save_detailed and scale in example_scales: scale_ind = np.nonzero(example_scales == scale)[0][0] snippets[f'snip_{stage}'][:, ..., scale_ind] = signals[stage] + # Execute piecewise again: + for j, thresholds in enumerate(thresh_abs): + # Finalize processing: + feat = sosfilter((signals['conv'] > thresholds).astype(float), + rate, config['feat_fcut'], 'lp', + padtype='fixed', padlen=config['padlen']) + + # Log intensity measure: + measures['measure_feat'][i, :, j] = feat[segment, :].mean(axis=0) + + # Log optional snippet data: + if save_detailed and scale in example_scales: + snippets['snip_feat'][:, :, scale_ind, j] = feat + # Save analysis results: if save_path is not None: data = dict( scales=scales, example_scales=example_scales, + thresh_rel=thresh_rel, + thresh_abs=thresh_abs, ) data.update(measures) if save_detailed: diff --git a/python/save_ref_measures_field.py b/python/save_ref_measures_field.py new file mode 100644 index 0000000..ff44a36 --- /dev/null +++ b/python/save_ref_measures_field.py @@ -0,0 +1,42 @@ +import numpy as np +from thunderhopper.filetools import search_files +from thunderhopper.model import process_signal +from thunderhopper.modeltools import load_data +from IPython import embed + +## SETTINGS: + +# General: +stages = ['raw', 'filt', 'env', 'log', 'inv', 'conv', 'feat'] +noise_path = search_files('merged_noise', dir='../data/field/processed/noise/')[0] +save_path = '../data/inv/field/ref_measures.npz' +channels = np.array([0, 1, 2, 3, 4, 5, 6, 7]) + +# PROCESSING: + +# Load pure-noise starter representation: +noise_data, config = load_data(noise_path, stages[0]) +# Accumulate channels in time-major order: +starter = noise_data[stages[0]][:, channels].ravel(order='F') + +# Get song segment to be analyzed: +time = np.arange(starter.shape[0]) / config['rate'] +start, end = noise_data['songs_0'].ravel() +segment = (time >= start) & (time <= end) + +# Run pipeline: +data = process_signal(config, stages, signal=starter, rate=config['rate'])[0] + +# Get measures: +measures = {} +for stage in stages: + if stage == 'feat': + measures[stage] = data[stage][segment, :].mean(axis=0) + else: + measures[stage] = data[stage][segment, ...].std(axis=0) + +# Save results: +np.savez(save_path, **measures) + +print('Done.') +embed() diff --git a/python/save_thresholds.py b/python/save_thresholds.py new file mode 100644 index 0000000..c39d380 --- /dev/null +++ b/python/save_thresholds.py @@ -0,0 +1,72 @@ +import numpy as np +from thunderhopper.filters import sosfilter +from thunderhopper.model import convolve_kernels, process_signal +from thunderhopper.modeltools import load_data +from IPython import embed + +## SETTINGS: + +# General: +mode = ['thresh_lp', 'full', 'short', 'field'][3] +if mode == 'field': + noise_path = '../data/field/processed/noise/merged_noise.npz' + channels = np.array([0, 1, 2, 3, 4, 5, 6, 7]) +else: + noise_path = '../data/processed/white_noise_sd-1.npz' +save_path = '../data/inv/' +start_stage = dict( + thresh_lp='inv', + full='raw', + short='raw', + field='raw' +)[mode] + +# Analysis: +factors = np.concatenate([np.arange(-4, 0, 0.01), np.arange(0, 4.01, 0.01)]) +pad = np.array([0.1, 0.9]) + +# PROCESSING: + +print(f'Fetching threshold data in {mode} mode...') + +# Load pure-noise starter representation: +noise_data, config = load_data(noise_path, start_stage) +starter = noise_data[start_stage] + +# Prepare buffered measurement segment: +pad = (pad * starter.shape[0]).astype(int) +segment = np.arange(starter.shape[0])[pad[0]:pad[1]] + +if mode != 'field': + # Normalize starter: + starter /= starter[segment].std() + +# Run (partial) pipeline: +print('Running pipeline...') +if mode == 'thresh_lp': + conv = convolve_kernels(starter, config['kernels'], config['k_specs']) +elif mode == 'full': + conv = process_signal(config, 'conv', signal=starter, rate=config['rate'])[0]['conv'] +elif mode == 'short': + env = process_signal(config, 'env', signal=starter, rate=config['rate'])[0]['env'] + inv = sosfilter(env, config['env_rate'], config['inv_fcut'], 'hp', + padtype='constant', padlen=config['padlen']) + conv = convolve_kernels(inv, config['kernels'], config['k_specs']) +elif mode == 'field': + starter = starter[:, channels].ravel(order='F') + conv = process_signal(config, 'conv', signal=starter, rate=config['rate'])[0]['conv'] + +# Get baseline kernel response SDs: +sds = conv[segment, :].std(axis=0) + +# Get corresponding supra-threshold proportions: +percs = np.zeros((len(factors), conv.shape[1])) +for i, factor in enumerate(factors): + print(f'Processing factor {i + 1} / {factors.size}...') + percs[i] = (conv > (factor * sds)).sum(axis=0) / conv.shape[0] + +# Save results: +np.savez(save_path + f'{mode}/thresholds.npz', factors=factors, sds=sds, percs=percs) + +print('Done.') +embed()