# HG changeset patch # User Daniel Wolff # Date 1471604826 -7200 # Node ID cc4b1211e677058c56564b02ffec23107f079845 initial commit to HG from Changeset: 646 (e263d8a21543) added further path and more save "camirversion.m" diff -r 000000000000 -r cc4b1211e677 core/comp_partBinData_ISMIR12_01.mat Binary file core/comp_partBinData_ISMIR12_01.mat has changed diff -r 000000000000 -r cc4b1211e677 core/comp_partBinData_ISMIR12_02.mat Binary file core/comp_partBinData_ISMIR12_02.mat has changed diff -r 000000000000 -r cc4b1211e677 core/comparisons_final.csv --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/comparisons_final.csv Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,534 @@ +"clip1_id" "clip2_id" "clip3_id" "clip1_numvotes" "clip2_numvotes" "clip3_numvotes" "clip1_mp3_path" "clip2_mp3_path" "clip3_mp3_path" +"42344" "52148" "53079" "3" "1" "6" "8/jacob_heringman-josquin_des_prez_lute_settings-10-da_ripa__part_ii_per_illud_ave-59-88.mp3" "4/tim_rayborn-the_path_beyond-14-taqsim_ud-175-204.mp3" "9/the_wretch-ambulatory-15-release-146-175.mp3" +"44925" "17654" "56325" "2" "0" "0" "0/american_bach_soloists-j_s__bach__cantatas_volume_v-11-weinen_klagen_sorgen_zagen_bwv_12_vi_aria__sei_getreu-30-59.mp3" "8/hybris-the_first_words-04-final_trust-146-175.mp3" "9/the_strap_ons-geeking_crime-20-pimps-59-88.mp3" +"25699" "2619" "15184" "0" "0" "2" "b/philharmonia_baroque-beethoven_symphonies_no_3_eroica_and_no_8-06-8_symph_3rd-117-146.mp3" "4/jay_kishor-the_color_of_night-01-malkauns-1045-1074.mp3" "4/seth_carlin-schubert__works_for_solo_fortepiano-03-sonata_in_eflat_major_opus_posth__122__menuetto_allegretto-30-59.mp3" +"2308" "57629" "44657" "0" "0" "2" "0/american_bach_soloists-joseph_haydn__masses-01-kyrie__allegro_moderato-146-175.mp3" "c/magnatune-classical-24-la_primavera_robert_jones_sweet_kate-59-88.mp3" "f/ehren_starks-lines_build_walls-10-tunnel_systems-88-117.mp3" +"45324" "3858" "13497" "15" "14" "6" "5/burnshee_thornside-rock_this_moon-11-city_girls-30-59.mp3" "9/sitar-cd1_the_sowebo_concert-01-raga_maru_bihag_alapjay_kishor-2089-2118.mp3" "6/electric_frankenstein-conquers_the_world-03-just_like_your_mom-0-29.mp3" +"10329" "41054" "6190" "2" "0" "1" "8/tim_rayborn-veils_of_light-02-tabak-204-233.mp3" "d/jeff_wahl-guitarscapes-09-spanish_ballad-30-59.mp3" "f/strojovna_07-dirnix-02-basetra_noje-204-233.mp3" +"8773" "35753" "41690" "2" "0" "0" "b/satori-journey_to_other_worlds-02-other_worlds-1596-1625.mp3" "4/dj_cary-downtempo_chill_2-08-huan_ying_mr_gelatine-30-59.mp3" "7/falik-elvolution-09-what_not_things_seem_are_they-0-29.mp3" +"46596" "37337" "26543" "11" "3" "3" "1/mijo-fata_morgana-11-relativity-88-117.mp3" "9/wicked_boy-chemistry-08-slomo-233-262.mp3" "6/strojovna_07-mirnix-06-d_a_r_k-146-175.mp3" +"8182" "49371" "58152" "6" "2" "0" "7/monoide-zeitpunkt-02-letzter_vorschlag-0-29.mp3" "e/briddes_roune-lenten_is_come-12-worldes_blis-30-59.mp3" "d/janine_johnson-telemann_harpsichord_solos_from_der_getreue_music_meister-26-suite_sarabande_pezold-88-117.mp3" +"55261" "51409" "44825" "2" "0" "7" "8/jacob_heringman-josquin_des_prez_lute_settings-18-newsidler__adieu_mes_amours-146-175.mp3" "5/paul_beier-simone_molinaro-14-frais___gaillard__canzone_francese_a_quattro_di_clemens_non_papa-175-204.mp3" "6/norine_braun-crow-10-wingsphoenix-59-88.mp3" +"7268" "32689" "51710" "0" "2" "0" "6/ed_martin-luis_milan__el_maestro-02-fantasia_2-117-146.mp3" "e/burning_babylon-stereo_mash_up-07-parkton_special-175-204.mp3" "1/tim_rayborn-qadim-14-perishde__santur_persian-59-88.mp3" +"3132" "34171" "16140" "3" "0" "0" "2/jamie_janover-evolutions-01-outer_cape-233-262.mp3" "4/myles_cochran-marginal_street-07-wait_a_while-117-146.mp3" "f/norine_braun-and_the_mood_swings-03-wounded_little_bird-204-233.mp3" +"47390" "1628" "17122" "0" "0" "1" "6/falik-streaks_and_strokes-11-xanthanon-146-175.mp3" "8/magnatune-red_hat_summit_compilation-01-fluid__headphones-88-117.mp3" "d/ambient_teknology-the_all_seeing_eye_project-04-confusion_says-175-204.mp3" +"50779" "19859" "36049" "2" "4" "1" "2/vito_paternoster-cd2bach_cello_suites-13-suite_v_in_do_minore__prelude-88-117.mp3" "8/cargo_cult-vibrant-04-son_of_ambriel-30-59.mp3" "1/phebe_craig_and_katherine_westine-beside_themselves-08-js_bach_concerto_in_c_major_for_2_harpsichords_allegro_moderato-436-465.mp3" +"5221" "20497" "8949" "0" "0" "1" "7/roots_of_rebellion-the_looking_glass-01-the_order-175-204.mp3" "e/atomic_opera-penguin_dust-04-thirst-175-204.mp3" "c/jamie_janover-now_center_of_time-02-playa-639-668.mp3" +"45584" "8666" "5511" "0" "0" "1" "0/rocket_city_riot-middle_age_suicide-11-everyday_like_saturday_bonus_track-30-59.mp3" "d/drevo-christian_themes_in_ukrainian_folk_songs-02-oh_uncle_in_your_yard-146-175.mp3" "3/emmas_mini-beat_generation_mad_trick-01-unknown-146-175.mp3" +"696" "49895" "47234" "1" "2" "0" "3/kiev_theological_academy_and_seminary_choir-traditional_worship_singing_of_kievpechersk_lavra-01-blessed_is_the_man-0-29.mp3" "5/paul_beier-simone_molinaro-13-fantasia_decima-175-204.mp3" "a/edward_martin_and_william_bastian-virtues_and_vices-11-tobacco_tobias_hume-0-29.mp3" +"23031" "4167" "5239" "13" "2" "2" "4/seismic_anamoly-sweet_rock_candy-05-kick_in_tha_nuts-0-29.mp3" "3/etherfysh-box_of_fysh-01-sanctuary-552-581.mp3" "d/rapoon-vernal_crossing-01-the_same_river_once-88-117.mp3" +"46621" "30446" "47738" "17" "7" "18" "e/jade_leary-fossildawn-11-retroscope-175-204.mp3" "0/ammonite-reconnection-07-angel_hold_on___-291-320.mp3" "b/magnatune_compilation-rock-12-cargo_cult_our_song-0-29.mp3" +"696" "47234" "49895" "0" "0" "4" "3/kiev_theological_academy_and_seminary_choir-traditional_worship_singing_of_kievpechersk_lavra-01-blessed_is_the_man-0-29.mp3" "a/edward_martin_and_william_bastian-virtues_and_vices-11-tobacco_tobias_hume-0-29.mp3" "5/paul_beier-simone_molinaro-13-fantasia_decima-175-204.mp3" +"7546" "13981" "28007" "1" "0" "0" "9/wicked_boy-chemistry-02-give_it_to_me-146-175.mp3" "6/stargarden-ambient_excursions-03-mt_fuji-0-29.mp3" "7/claire_fitch-ambiencellist_part_ii-06-mist-146-175.mp3" +"58285" "52226" "2634" "7" "5" "14" "d/janine_johnson-telemann_harpsichord_solos_from_der_getreue_music_meister-27-suite_gigue_pezold-117-146.mp3" "1/jeff_wahl-guinevere-14-tristan_and_isolde-320-349.mp3" "4/jay_kishor-the_color_of_night-01-malkauns-1480-1509.mp3" +"23857" "20500" "8554" "0" "1" "1" "c/vito_paternoster-cd1bach_sonatas_and_partitas_for_solo_violin-05-partita_prima_in_mi_minore__allemanda-204-233.mp3" "4/the_wretch-cyst-04-this_is_terror-59-88.mp3" "d/paul_avgerinos-balancing_spheres-02-night_illusions__balancing_spheres-146-175.mp3" +"43523" "11635" "13256" "2" "0" "0" "a/asteria-soyes_loyal-10-of_a_rose_singe_we_lute_anon-88-117.mp3" "d/seismic_anamoly-afterburner-03-anthem_for_hcp-0-29.mp3" "7/aba_structure-epic-03-houseboats-233-262.mp3" +"11075" "2626" "11839" "1" "3" "4" "1/zilla-egg-02-wicker_pilots-59-88.mp3" "4/jay_kishor-the_color_of_night-01-malkauns-1248-1277.mp3" "5/rapoon-cidar-03-black_feel-88-117.mp3" +"33887" "29295" "50200" "2" "0" "0" "b/paul_beier-alessandro_piccinini-07-toccata_xvii-175-204.mp3" "f/rapoon-tin_of_drum-06-southbound-639-668.mp3" "b/cargo_cult-alchemy-13-matt-0-29.mp3" +"21116" "49590" "2443" "2" "9" "6" "1/solace-the_gathering_season-05-aenaem-349-378.mp3" "c/edward_martin-art_of_the_lute_in_renaissance_france-13-aupres_de_vous_sermisy-30-59.mp3" "5/thursday_group-uncle_mean-01-like_white_on_rice-117-146.mp3" +"27326" "38122" "37180" "11" "26" "7" "9/the_seldon_plan-making_circles-06-holding_patterns_are_slow-175-204.mp3" "c/rapoon-easterly_6_or_7-08-variable_2-349-378.mp3" "c/william_brooks-fowl_mouth-08-scooter-0-29.mp3" +"35492" "42331" "23020" "2" "8" "6" "d/beth_quist-shall_we_dance-08-finale-291-320.mp3" "1/richard_savino-murcia__danza_y_diferencias-10-correnta-30-59.mp3" "7/solace-vadalna-05-khataralex_mix-59-88.mp3" +"49020" "53375" "5363" "1" "0" "1" "b/hanneke_van_proosdij-harpsichord_suites_of_chambonnieres-12-suite_in_g_major__menuet-0-29.mp3" "b/janine_johnson-bach_goldberg_variations-15-variatio_14_a_2_clav-30-59.mp3" "a/ensemble_sreteniye-ancient_church_singing_of_byzantine_georgia_and_rus-01-to_thee_we_sing_bulgarian_chant_tone_legetos-88-117.mp3" +"42344" "53079" "52148" "3" "5" "1" "8/jacob_heringman-josquin_des_prez_lute_settings-10-da_ripa__part_ii_per_illud_ave-59-88.mp3" "9/the_wretch-ambulatory-15-release-146-175.mp3" "4/tim_rayborn-the_path_beyond-14-taqsim_ud-175-204.mp3" +"35224" "47678" "33174" "0" "0" "2" "4/american_bach_soloists-j_s__bach__mass_in_b_minor_cd1-08-domine_deus-30-59.mp3" "7/american_bach_soloists-j_s__bach__mass_in_b_minor_cd2-12-benedictus-117-146.mp3" "c/william_brooks-fowl_mouth-07-silent_wings-204-233.mp3" +"56338" "10625" "6094" "11" "2" "3" "0/daniel_ben_pienaar-book_2_cd2_welltempered_clavier-20-prelude_and_fugue_no__22_in_bflat_minor_bwv_891_fuga-88-117.mp3" "f/jackalopes-jacksploitation-02-those_great_big____memories-88-117.mp3" "8/skitzo-hellavator_musick-02-angels_blood-117-146.mp3" +"5783" "50307" "40683" "7" "21" "6" "a/rhonda_lorence-winter_moon-01-winter_moon-233-262.mp3" "e/magnatune_com-magnatune_at_the_cc_salon-13-one_at_a_time_burnshee_thornside-117-146.mp3" "d/beth_quist-shall_we_dance-09-ritual-291-320.mp3" +"10728" "6954" "26234" "1" "0" "1" "7/dac_crowell-the_sea_and_the_sky-02-umi_no_kami_ni_kansha-117-146.mp3" "e/solace-balance-02-dragon_and_sword-204-233.mp3" "1/tim_rayborn-qadim-06-calliopeia__kithara_ancient_greek-146-175.mp3" +"13981" "7546" "28007" "1" "1" "0" "6/stargarden-ambient_excursions-03-mt_fuji-0-29.mp3" "9/wicked_boy-chemistry-02-give_it_to_me-146-175.mp3" "7/claire_fitch-ambiencellist_part_ii-06-mist-146-175.mp3" +"26625" "47188" "55324" "0" "0" "4" "c/domased-new_memories-06-deadly_season-465-494.mp3" "9/wicked_boy-chemistry-11-the_theme-0-29.mp3" "f/philharmonia_baroque_orchestra-handel__atalanta_cd2-18-recitativo_lo_vo_morir___gia_corro___-0-29.mp3" +"20805" "13998" "46366" "11" "6" "18" "e/tilopa-out_of_the_blue-04-wings_over_water-320-349.mp3" "6/stargarden-ambient_excursions-03-mt_fuji-494-523.mp3" "7/american_bach_soloists-j_s__bach__mass_in_b_minor_cd2-11-osanna_in_exelsis-117-146.mp3" +"46491" "7744" "28319" "0" "0" "3" "7/artemis-gravity-11-prayer-233-262.mp3" "6/mercy_machine-the_devil_i_know-02-i_need_a_new_jesus-146-175.mp3" "2/ensemble_sreteniye___three_holies_church_choristers-dont_cry_rachael-06-open_to_me_the_doors_of_repentance_byzantine_chant_bulgarian_tradition-30-59.mp3" +"14539" "49873" "5223" "0" "0" "1" "d/shira_kammen-mistral-03-quand_jetais_jeune-59-88.mp3" "b/magnatune_compilation-rock-13-emmas_mini_disconnected-204-233.mp3" "2/jesse_manno-sea_spirits-01-the_river-0-29.mp3" +"20302" "28479" "35237" "0" "1" "2" "8/stargarden-music_for_modern_listening-04-termoli-30-59.mp3" "a/liquid_zen-magic_midsummer-06-por_tus_ojos-88-117.mp3" "f/asteria-le_souvenir_de_vous_me_tue-08-dona_gentile_lute_guillaume_dufay-59-88.mp3" +"36300" "42175" "49336" "1" "0" "0" "6/dj_markitos-slower_emotions138_bpm_remixes-08-love_peace_and_ecstasy_138_bpm_remix-262-291.mp3" "c/reza_manzoori-restrung-10-breath-30-59.mp3" "e/justin_bianco-forge-12-war-146-175.mp3" +"27871" "43565" "52571" "2" "0" "0" "4/dj_markitos-evolution_of_the_mind-06-losing_control-175-204.mp3" "4/tim_rayborn-the_path_beyond-10-osman_pasha-291-320.mp3" "7/jag-cypress_grove_blues-15-cypress_boogie-0-29.mp3" +"51816" "2536" "57384" "2" "10" "3" "0/paul_berget-the_siena_manuscript_on_steel_string_guitar-14-recercar_14_steel_string_guitar-88-117.mp3" "5/processor-are_you_for_real-01-mad_bull-204-233.mp3" "e/philharmonia_baroque_orchestra-handel__atalanta_cd1-23-recitativo_amarilli_oh_dei_qui_tirsi-30-59.mp3" +"37083" "27985" "15072" "0" "2" "1" "0/paul_berget-the_siena_manuscript_on_steel_string_guitar-08-recercar_8_steel_string_guitar-59-88.mp3" "1/william_brooks-karma_dogs-06-miracle-117-146.mp3" "4/myles_cochran-marginal_street-03-so_gone-378-407.mp3" +"27211" "1987" "11300" "1" "0" "3" "3/sherefe-opium-06-geldim_bir_kara_tasha-175-204.mp3" "e/c_layne-the_sun_will_come_out_to_blind_you-01-i_dont_care_if_you_lie-146-175.mp3" "c/lvx_nova-lvx_nova-03-kyoto_nights-639-668.mp3" +"7785" "50207" "38252" "5" "2" "2" "2/aba_structure-tektonik_illusion-02-illusion-233-262.mp3" "b/cargo_cult-alchemy-13-matt-204-233.mp3" "0/williamson-a_few_things_to_hear_before_we_all_blow_up-08-whats_on_the_ceiling_beats_whats_on_tv-291-320.mp3" +"36439" "2295" "20485" "43" "18" "5" "5/arthur_yoria-of_the_lovely-08-my_best_routines-117-146.mp3" "e/tilopa-out_of_the_blue-01-kyopianosus-88-117.mp3" "4/human_response-delirious-04-theoria-175-204.mp3" +"39639" "6259" "13759" "6" "18" "16" "e/jamie_janover_and_michael_masley-all_strings_considered-09-innerlude-30-59.mp3" "e/jamie_janover_and_michael_masley-all_strings_considered-02-birds_of_mindrise-204-233.mp3" "7/james_edwards-canarios-03-marionas__gerau-117-146.mp3" +"40792" "18557" "50294" "7" "2" "2" "4/john_williams-long_ride_home-09-she_walks-30-59.mp3" "1/touchinggrace-the_reformation_sessions-04-melon_tropic_sunrise-378-407.mp3" "2/shira_kammen-the_almanac-13-o_western_wind-59-88.mp3" +"24418" "26551" "52873" "0" "2" "0" "2/indidginus-seismic-05-shindormo-436-465.mp3" "d/rapoon-vernal_crossing-06-dahina_ta-117-146.mp3" "d/seth_and_maryse_carlin-schubert__music_for_fortepiano_four_hands-15-marche_militaire-88-117.mp3" +"37812" "18221" "58717" "7" "32" "9" "2/jami_sieber-lush_mechanique-08-the_darkening_ground-59-88.mp3" "c/seismic_anamoly-ramifications-04-julies_tune-552-581.mp3" "c/edward_martin-art_of_the_lute_in_renaissance_france-35-duel_double_duel_lupi-30-59.mp3" +"29201" "12970" "10292" "2" "0" "1" "3/les_filles_de_sainte_colombe-german_music_for_viols_and_harpsichord-06-sonata_viii_in_c_minor_schenck-494-523.mp3" "7/beight-file_in_rhythm-03-fill_me_up-0-29.mp3" "3/very_large_array-stuff-02-superdrag-146-175.mp3" +"11345" "27917" "51875" "20" "7" "0" "1/tanya_tomkins_and_eric_zivian-beethoven_piano_and_cello_works-03-7_variations_on_a_theme_from_the_magic_flute_in_eb_woo_46-204-233.mp3" "b/electric_frankenstein-dead_and_back-06-make_no_mistake-30-59.mp3" "9/the_wretch-ambulatory-14-sickness-146-175.mp3" +"27133" "36403" "41929" "0" "0" "1" "4/seismic_anamoly-sweet_rock_candy-06-fire_in_tha_hole-291-320.mp3" "b/elizabeth_wolff-moments_musicaux-08-moritz_moszkowski__3_moments_musicaux_op__7__con_moto-204-233.mp3" "4/john_williams-long_ride_home-10-admit_one-88-117.mp3" +"46681" "16199" "2705" "10" "25" "51" "5/paul_beier-simone_molinaro-11-rossingnolet__canzone_francese_a_quattro_di_thomas_crequillon-146-175.mp3" "3/kitka-the_vine-03-zhuravlyky_odletily_ukraine-88-117.mp3" "5/dj_markitos-unreachable_destiny-01-manteia-204-233.mp3" +"10715" "16804" "21421" "0" "1" "1" "5/stargarden-the_art_of_analog_diversion-02-trybal-262-291.mp3" "8/mercy_machine-mercy_machine-04-bones-146-175.mp3" "c/five_star_fall-automatic_ordinary-05-between_2_floors-204-233.mp3" +"28835" "17708" "13286" "1" "0" "0" "9/self_delusion-happiness_hurts_me-06-secret_life-175-204.mp3" "1/jeff_wahl-guinevere-04-freedom-30-59.mp3" "3/sherefe-opium-03-huzam_kamelieriko-59-88.mp3" +"15069" "32776" "42794" "22" "16" "12" "4/myles_cochran-marginal_street-03-so_gone-291-320.mp3" "2/duo_chambure-vihuela_duets_of_valderrabano-07-payne_trabel_jean_mouton-59-88.mp3" "9/etherine-gleam-10-gleam_remix-30-59.mp3" +"3450" "48198" "42896" "16" "15" "7" "2/maryse_carlin-rameau__pieces_de_clavecin_en_concerts__forqueray_suites_4_and_5-01-premier_concert__coulicam_rameau-30-59.mp3" "e/magnatune_compilation-high_energy_rock_and_roll-12-jackalopes_cielo_69-59-88.mp3" "c/william_brooks-blue_ribbon__the_best_of_william_brooks-10-hideaway-88-117.mp3" +"19353" "36904" "41821" "0" "1" "1" "3/etherfysh-box_of_fysh-04-rama-1132-1161.mp3" "a/janine_johnson-chopin_recital-08-polonaise_op_44_in_f_minor-552-581.mp3" "f/the_headroom_project-jetuton_andawai-10-late_night_blues-146-175.mp3" +"27534" "6408" "25167" "1" "1" "0" "a/plunkett-14_days-06-just_rise-0-29.mp3" "d/kitka-wintersongs-02-byla_cesta_moravia-30-59.mp3" "e/jeff_wahl-meditative_guitar-05-the_persistence_of_hope-117-146.mp3" +"24125" "38341" "39174" "25" "18" "14" "f/lizzi-love_and_you_and_i-05-remedy-175-204.mp3" "f/the_headroom_project-jetuton_andawai-09-anu_de_dilaho-0-29.mp3" "e/magnatune_compilation-high_energy_rock_and_roll-09-electric_frankenstein_new_rage-59-88.mp3" +"40226" "41470" "12032" "25" "4" "9" "d/drevo-christian_themes_in_ukrainian_folk_songs-09-o_did_you_learn_o_people_such_news-146-175.mp3" "b/suzanne_teng-miles_beyond-09-tiens_lullaby-175-204.mp3" "e/dac_crowell-within_this_space-03-cantillation-639-668.mp3" +"20213" "448" "33029" "1" "1" "1" "b/rob_costlow-sophomore_jinx-04-summer_garden-204-233.mp3" "b/satori-journey_to_other_worlds-01-astral_flight-813-842.mp3" "1/zilla-egg-07-rufus-88-117.mp3" +"28315" "55324" "5002" "1" "5" "2" "f/norine_braun-and_the_mood_swings-06-one_breath-146-175.mp3" "f/philharmonia_baroque_orchestra-handel__atalanta_cd2-18-recitativo_lo_vo_morir___gia_corro___-0-29.mp3" "4/dj_markitos-evolution_of_the_mind-01-sunset_endless_night_journey_remix-30-59.mp3" +"43707" "12454" "11351" "4" "1" "1" "8/justin_bianco-phoenix-10-pieces_of_every_day-59-88.mp3" "9/suzanne_teng-enchanted_wind-03-dark_moon-494-523.mp3" "1/tanya_tomkins_and_eric_zivian-beethoven_piano_and_cello_works-03-7_variations_on_a_theme_from_the_magic_flute_in_eb_woo_46-378-407.mp3" +"43612" "33478" "15969" "0" "0" "1" "8/stargarden-music_for_modern_listening-10-p5_mon_amour-175-204.mp3" "5/stargarden-step_off-07-step_out_to_the_stars-581-610.mp3" "1/zilla-egg-03-velvet_periscope-233-262.mp3" +"29527" "45860" "16281" "15" "7" "21" "c/glen_bledsoe-up_and_down-06-tango_cacafuego-146-175.mp3" "7/rapoon-what_do_you_suppose-11-i_dont_expect_anyone-146-175.mp3" "6/ralph_meulenbroeks-gambomania-04-sonata_in_d_minor_adagio_karl_friedrich_abel-59-88.mp3" +"20548" "23383" "53557" "12" "43" "20" "4/jami_sieber-second_sight-04-tree_of_love-233-262.mp3" "8/burning_babylon-knives_to_the_treble-05-mash_up_the_dj-117-146.mp3" "e/briddes_roune-lenten_is_come-16-brid_by_a_strem-59-88.mp3" +"21148" "32369" "22217" "1" "0" "0" "1/tim_rayborn-qadim-05-alevlen__saz_turkish-117-146.mp3" "b/elizabeth_wolff-moments_musicaux-07-moritz_moszkowski__3_moments_musicaux_op__7__allegramente-30-59.mp3" "5/curandero-aras-05-embrujada-378-407.mp3" +"6115" "20862" "30985" "0" "2" "0" "5/dj_markitos-unreachable_destiny-02-aquila-146-175.mp3" "3/sherefe-opium-04-yianni_mou_to-262-291.mp3" "f/norine_braun-and_the_mood_swings-07-cruel_streak-175-204.mp3" +"44689" "195" "22299" "4" "8" "1" "7/rob_costlow-woods_of_chaos-10-twilight-88-117.mp3" "9/the_seldon_plan-making_circles-01-a_rhyming_dictionary-175-204.mp3" "6/ed_martin-luis_milan__el_maestro-05-fantasia_5-88-117.mp3" +"78" "7807" "35976" "8" "0" "1" "9/american_bach_soloists-heinrich_schutz__musicalische_exequien-01-musicalische_exequien_swv_279_teil_i_concert_in_form_einer_teutschen_begrabnismissa-1132-1161.mp3" "0/solace-iman-02-iman-146-175.mp3" "8/mercy_machine-in_your_bed__the_remixes-08-invisible_cosmic_sea_shanty_mix-262-291.mp3" +"49311" "49514" "21556" "1" "0" "1" "a/janine_johnson-chopin_recital-12-valse_brilliante_op_18_in_eb_major-146-175.mp3" "1/tim_rayborn-qadim-13-amniat__robab_afghan-59-88.mp3" "a/james_edwards-le_tresor_dorphee_by_antoine_francisque-05-branle_simple_a_cordes_avalees_troisiesme-30-59.mp3" +"21806" "43720" "27879" "11" "4" "4" "b/philharmonia_baroque-mozart_orchestral_works-05-concerto_for_flute_and_harp_in_c_major_kv299_allegro-175-204.mp3" "5/arthur_yoria-suerte_something-10-places_everyone-59-88.mp3" "5/mrdc-plethora-06-lostcode-88-117.mp3" +"36308" "49254" "57299" "0" "0" "1" "2/version-versions-08-love_redux-117-146.mp3" "e/burning_babylon-stereo_mash_up-12-trouble_dub-59-88.mp3" "3/jag-four_strings-23-four_string_blues-30-59.mp3" +"37286" "26497" "39679" "8" "0" "3" "2/claire_fitch-ambiencellist-08-shortest_day-59-88.mp3" "b/lisa_debenedictis-mixter_one-06-cuckoo_passiveaggressive_mix_by_hisboyelroy-146-175.mp3" "0/william_brooks-bitter_circus-09-is_there_anybody_there-59-88.mp3" +"45485" "8259" "37965" "76" "18" "14" "2/magnatune_compilation-electronica-11-dj_markitos_interplanetary_travel-146-175.mp3" "9/the_seldon_plan-making_circles-02-making_circles-117-146.mp3" "f/pain_factor-8_seconds-08-thin_line-204-233.mp3" +"33237" "39060" "50855" "8" "7" "14" "4/dj_cary-downtempo_chill_2-07-son_of_ambriel_cargo_cult-0-29.mp3" "c/liquid_zen-oscilloscope-09-dense_properties-262-291.mp3" "b/richard_savino-mertz__bardic_sounds-13-tarantella-204-233.mp3" +"37661" "52495" "6809" "15" "21" "26" "1/zilla-egg-08-sun_of_many_maps-291-320.mp3" "b/richard_savino-mertz__bardic_sounds-15-childrens_fairy_tale-320-349.mp3" "2/roots_of_rebellion-surfacing-02-declaration-233-262.mp3" +"51628" "19583" "44622" "3" "6" "4" "d/shiva_in_exile-ethnic-14-myingmar-581-610.mp3" "d/tim_rayborn-chordae-04-sempr_alegria-639-668.mp3" "d/various_artists-the_art_of_persuasion-10-touch_ammonite-175-204.mp3" +"23034" "2003" "56164" "2" "0" "0" "4/seismic_anamoly-sweet_rock_candy-05-kick_in_tha_nuts-88-117.mp3" "a/tilopa-pictures_of_silence-01-ichi-146-175.mp3" "9/janine_johnson-german_keyboard_masters-20-ciacona_in_e_buxtehude-88-117.mp3" +"3892" "28337" "54954" "1" "1" "2" "5/sitar-first_congregational_church_concert-01-raga_patdeep__alap__jay_kishor-871-900.mp3" "6/the_kokoon-erase-06-order-59-88.mp3" "b/janine_johnson-bach_goldberg_variations-17-variatio_16_a_1_clav_ouverture-0-29.mp3" +"54283" "29638" "54881" "0" "2" "0" "e/philharmonia_baroque_orchestra-handel__atalanta_cd1-17-aria_riportai_glorioso_palma-88-117.mp3" "b/liquid_zen-elements_at_loop_10-06-the_surface-0-29.mp3" "6/doc_rossi-demarzi6_sonatas_for_cetra_o_kitara-17-the_orange_rogue_trad__arr__rossi-117-146.mp3" +"8781" "31387" "38170" "5" "3" "5" "c/rapoon-easterly_6_or_7-02-our_tresspasses-175-204.mp3" "7/aba_structure-epic-07-erased-755-784.mp3" "5/cheryl_ann_fulton-the_once_and_future_harp-08-virgin_light-0-29.mp3" +"45713" "41781" "25493" "14" "4" "4" "7/beight-file_in_rhythm-11-ghostly_feeling-88-117.mp3" "7/monoide-zeitpunkt-09-zeitpunkt-204-233.mp3" "6/drop_trio-cezanne-05-wreck_of_the_zephyr-407-436.mp3" +"1224" "7261" "15290" "1" "3" "2" "4/human_response-delirious-01-delirious-291-320.mp3" "1/richard_savino-murcia__danza_y_diferencias-02-fandango-117-146.mp3" "9/domased-slowdown-03-sound_of_ambient_part_1-175-204.mp3" +"36741" "14366" "27047" "24" "21" "21" "b/rebel_rebel-explode_into_space-08-our_time_is_now-30-59.mp3" "9/vito_paternoster-cd2bach_sonatas_and_partitas_for_solo_violin-03-partita_seconda_in_sol_minore__sarabanda-117-146.mp3" "c/rapoon-easterly_6_or_7-06-falling_more_slowly-204-233.mp3" +"20288" "8922" "37692" "1" "5" "3" "9/introspekt-ideology-04-tbd-262-291.mp3" "f/lisa_debenedictis-fruitless-02-planet_you-88-117.mp3" "7/solace-rhythm_of_the_dance-08-sword_dance_routine-204-233.mp3" +"2634" "52226" "58285" "7" "11" "12" "4/jay_kishor-the_color_of_night-01-malkauns-1480-1509.mp3" "1/jeff_wahl-guinevere-14-tristan_and_isolde-320-349.mp3" "d/janine_johnson-telemann_harpsichord_solos_from_der_getreue_music_meister-27-suite_gigue_pezold-117-146.mp3" +"32158" "26800" "25154" "0" "1" "1" "6/norine_braun-miles_to_go-07-little_lamb-88-117.mp3" "9/indidginus-sine_language-06-dubliminal-146-175.mp3" "2/shira_kammen-the_almanac-05-the_leaves_be_green-0-29.mp3" +"36931" "17220" "56532" "1" "6" "0" "e/mountain_mirrors-mountain_mirrors-08-praying_mantis-117-146.mp3" "d/paul_avgerinos-balancing_spheres-04-day_dreams__the_flames_are_awakened-88-117.mp3" "e/steven_devine-portrait_of_an_english_harpsichord-21-suite_no_5_in_e_major_allemande_george_frederick_handel-146-175.mp3" +"20327" "19482" "46159" "9" "4" "0" "6/dr_kuch-analog_disease-04-the_big_bang_theory-59-88.mp3" "4/paul_berget-j_s__bach_on_the_lute-04-sarabande__bwv_996-0-29.mp3" "a/kitka-nectar-11-miskolc_felol_hidegen_fuj_a_szel_hungary-175-204.mp3" +"33304" "23903" "38465" "1" "0" "0" "4/seth_carlin-schubert__works_for_solo_fortepiano-07-sonata_in_a_major_opus_posth__120__allegro-320-349.mp3" "5/thursday_group-uncle_mean-05-pelican_fan-407-436.mp3" "1/ambient_teknology-phoenix-09-afterburner-146-175.mp3" +"24349" "30020" "4270" "6" "30" "10" "b/oberlin_consort_of_viols-5_and_6_part_fantasies_of_william_lawes-05-set_a_5_in_c_minor-88-117.mp3" "4/myles_cochran-marginal_street-06-worlds_apart-146-175.mp3" "b/elizabeth_wolff-moments_musicaux-01-sergei_rachmaninoff__6_moments_musicaux_op__16__andantino-175-204.mp3" +"54892" "43402" "43640" "9" "8" "28" "d/drevo-christian_themes_in_ukrainian_folk_songs-17-through_the_wide_field_through_the_deep_sea-88-117.mp3" "d/the_west_exit-nocturne-10-nine_lives-204-233.mp3" "c/liquid_zen-seventythree-10-passing_cars-0-29.mp3" +"15069" "42794" "32776" "26" "19" "10" "4/myles_cochran-marginal_street-03-so_gone-291-320.mp3" "9/etherine-gleam-10-gleam_remix-30-59.mp3" "2/duo_chambure-vihuela_duets_of_valderrabano-07-payne_trabel_jean_mouton-59-88.mp3" +"10850" "12896" "36664" "0" "0" "3" "7/shane_jackman-equilibrium-02-uncommon_eloquence-0-29.mp3" "5/rapoon-fallen_gods-03-fallen_gods-175-204.mp3" "8/mediva-viva_mediva-08-ondas_da_mar_codax-175-204.mp3" +"37093" "21950" "43565" "0" "0" "2" "4/seismic_anamoly-sweet_rock_candy-08-refleections-146-175.mp3" "b/ehren_starks-the_depths_of_a_year-05-dads_song-233-262.mp3" "4/tim_rayborn-the_path_beyond-10-osman_pasha-291-320.mp3" +"32232" "23793" "50605" "10" "2" "12" "2/indidginus-as_above_so_below-07-machu-88-117.mp3" "3/dj_cary-eastern_grooves-05-oriental_distortionshiva_in_exile-59-88.mp3" "2/ensemble_mirable-conversations_galantes-13-sonata_ii_in_b_minor_ariagratioso_altro-88-117.mp3" +"29561" "43292" "18581" "7" "1" "13" "d/utopia_banished-night_of_the_black_wyvern-06-the_darkness_we_burn-30-59.mp3" "7/falik-elvolution-10-mediterranean_blue-262-291.mp3" "2/anup-embrace-04-minor_song-0-29.mp3" +"18587" "7531" "48098" "0" "3" "0" "2/anup-embrace-04-minor_song-175-204.mp3" "f/jacob_heringman_and_catherine_king-alonso_mudarra_songs_and_solos-02-gentil_cavallero-30-59.mp3" "3/jag-pretty_girl_blues-12-hopscotch-30-59.mp3" +"8538" "8682" "8932" "1" "1" "0" "a/tilopa-pictures_of_silence-02-ni-146-175.mp3" "0/beth_quist-silver-02-om_asatoma_sad_gamaya-233-262.mp3" "c/jamie_janover-now_center_of_time-02-playa-146-175.mp3" +"38341" "24125" "39174" "19" "14" "12" "f/the_headroom_project-jetuton_andawai-09-anu_de_dilaho-0-29.mp3" "f/lizzi-love_and_you_and_i-05-remedy-175-204.mp3" "e/magnatune_compilation-high_energy_rock_and_roll-09-electric_frankenstein_new_rage-59-88.mp3" +"39821" "34898" "48750" "2" "0" "2" "c/hands_upon_black_earth-hands_upon_black_earth-09-lastime-30-59.mp3" "c/o_fickle_fortune-a_celebration_of_robert_burns-08-bonie_wee_thing-146-175.mp3" "f/philharmonia_baroque_orchestra-handel__atalanta_cd2-12-recitativo_sono_irene_oppur_sogno-0-29.mp3" +"14378" "10167" "41899" "9" "1" "1" "5/new_york_consort_of_viols-dances_and_canzonas_of_holborne_and_brade-03-pavan_and_galliard_holborne-204-233.mp3" "d/processor-insomnia-02-straight_down-30-59.mp3" "0/williamson-a_few_things_to_hear_before_we_all_blow_up-10-a-175-204.mp3" +"6716" "4642" "16862" "0" "2" "0" "e/hans_christian-phantoms-02-coyotes_dance-88-117.mp3" "c/vito_paternoster-cd1bach_sonatas_and_partitas_for_solo_violin-01-sonata_prima_in_do_minore__adagio-117-146.mp3" "6/norine_braun-now_and_zen-04-buffalo_nights-146-175.mp3" +"22058" "31227" "43611" "0" "3" "0" "1/mijo-fata_morgana-05-deep_bass_9-146-175.mp3" "b/shane_jackman-sanctuary-07-early_frost-175-204.mp3" "8/stargarden-music_for_modern_listening-10-p5_mon_amour-146-175.mp3" +"44925" "56325" "17654" "1" "0" "0" "0/american_bach_soloists-j_s__bach__cantatas_volume_v-11-weinen_klagen_sorgen_zagen_bwv_12_vi_aria__sei_getreu-30-59.mp3" "9/the_strap_ons-geeking_crime-20-pimps-59-88.mp3" "8/hybris-the_first_words-04-final_trust-146-175.mp3" +"27133" "41929" "36403" "1" "1" "0" "4/seismic_anamoly-sweet_rock_candy-06-fire_in_tha_hole-291-320.mp3" "4/john_williams-long_ride_home-10-admit_one-88-117.mp3" "b/elizabeth_wolff-moments_musicaux-08-moritz_moszkowski__3_moments_musicaux_op__7__con_moto-204-233.mp3" +"3727" "21645" "21192" "0" "1" "1" "c/jay_kishor-cd2_the_sowebo_concert-01-raga_malkauns-2408-2437.mp3" "c/strojovna_07-number_1-05-bycygel-59-88.mp3" "b/la_primavera-english_renaissance_music-05-amarilli_mia_bella_caccini-30-59.mp3" +"26836" "28405" "44292" "0" "11" "1" "5/edward_martin_and_paul_berget-baroque_lute_duets-06-duets_for_2_lutes_in_a_minor_and_f_major_losy__courante-0-29.mp3" "f/strojovna_07-dirnix-06-palacinka-59-88.mp3" "e/heavy_mellow-horizons-10-spinnaker-0-29.mp3" +"27923" "49468" "19003" "1" "1" "1" "d/shiva_in_exile-ethnic-06-manju-0-29.mp3" "a/plunkett-14_days-13-14_days-88-117.mp3" "1/jacob_heringman-holburns_passion-04-pavan_posthuma_lute-204-233.mp3" +"58323" "8426" "26605" "1" "0" "0" "b/jacob_heringman-jane_pickeringes_lute_book-28-a_fantasia-88-117.mp3" "1/dac_crowell-sferica-02-murata-697-726.mp3" "3/kitka-the_vine-06-de_szeretnek_az_egen_csillag_lenni_hungary-146-175.mp3" +"27052" "12381" "47377" "2" "0" "0" "c/rapoon-easterly_6_or_7-06-falling_more_slowly-349-378.mp3" "6/dj_markitos-slower_emotions138_bpm_remixes-03-cyber_evolution_138_bpm_remix-262-291.mp3" "c/liquid_zen-seventythree-11-wobble_into_venus-349-378.mp3" +"38573" "58553" "33945" "0" "2" "0" "a/liquid_zen-magic_midsummer-09-arabia_underwater-0-29.mp3" "b/hanneke_van_proosdij-harpsichord_suites_of_chambonnieres-30-suite_in_f_major__chaconne-30-59.mp3" "5/processor-are_you_for_real-07-touch_of_god-175-204.mp3" +"1757" "27347" "41280" "1" "4" "0" "8/tim_rayborn-veils_of_light-01-gilim-0-29.mp3" "f/ivilion-sartinal-06-hshante-233-262.mp3" "6/curandero-curandero-09-teddy_bear-30-59.mp3" +"17173" "56776" "25497" "3" "6" "8" "4/paul_avgerinos-sky_of_grace-04-dance_of_life-175-204.mp3" "4/ensemble_mirable-triemer_six_cello_sonatas-21-sonata_5_largo-88-117.mp3" "6/drop_trio-cezanne-05-wreck_of_the_zephyr-523-552.mp3" +"52981" "15187" "32559" "0" "0" "3" "9/american_baroque-dances_and_suites_of_rameau_and_couperin-15-passepieds_12_suite_from_les_fetes_dhebe_rameau-117-146.mp3" "4/seth_carlin-schubert__works_for_solo_fortepiano-03-sonata_in_eflat_major_opus_posth__122__menuetto_allegretto-117-146.mp3" "6/dj_markitos-slower_emotions138_bpm_remixes-07-ocean_of_the_emotions_138_bpm_remix-175-204.mp3" +"56140" "47903" "33640" "12" "2" "8" "9/the_sarasa_ensemble-a_baroque_mosaic-20-cantata_il_delirio_amoroso__aria_handel-233-262.mp3" "a/jade_leary-the_lost_art_of_human_kindness-12-earth_beyond_a_finite_thought-146-175.mp3" "d/ambient_teknology-the_all_seeing_eye_project-07-telekonology-204-233.mp3" +"51628" "44622" "19583" "2" "3" "10" "d/shiva_in_exile-ethnic-14-myingmar-581-610.mp3" "d/various_artists-the_art_of_persuasion-10-touch_ammonite-175-204.mp3" "d/tim_rayborn-chordae-04-sempr_alegria-639-668.mp3" +"45166" "12158" "10653" "1" "1" "0" "a/janine_johnson-chopin_recital-11-ballade_op_23_in_g_minor-262-291.mp3" "2/solace-ahsas-03-circle_58_68_78-146-175.mp3" "4/justin_bianco-siren-02-token-0-29.mp3" +"31234" "40259" "18106" "2" "2" "0" "a/jade_leary-and_come_the_sirens-07-earthwish_on_saturn-30-59.mp3" "2/duo_chambure-vihuela_duets_of_valderrabano-09-obsecro_te_domina_josquin_dez_prez-88-117.mp3" "2/jamie_janover-evolutions-04-interlocken-117-146.mp3" +"30132" "31689" "6060" "1" "1" "1" "4/falling_you-touch-07-____a_cry_for_the_brokenhearted-0-29.mp3" "8/ivilion-terean-07-gortri-146-175.mp3" "2/claire_fitch-ambiencellist-02-ambiancellist-0-29.mp3" +"48344" "8688" "42533" "5" "15" "16" "6/norine_braun-crow-12-mantra-59-88.mp3" "3/mandrake_root-the_seventh_mirror-02-one_in_a_million-88-117.mp3" "1/janine_johnson-j_s__bach_works_for_harpsichord-10-english_suite_no_3_in_g_minor__prelude-59-88.mp3" +"29122" "14354" "49164" "1" "0" "1" "4/seth_carlin-schubert__works_for_solo_fortepiano-06-sonata_in_a_major_opus_posth__120__andante-117-146.mp3" "0/chris_harvey-the_white_sail-03-paris-262-291.mp3" "9/introspekt-ideology-12-the_language_of_light-146-175.mp3" +"35237" "28479" "20302" "4" "0" "1" "f/asteria-le_souvenir_de_vous_me_tue-08-dona_gentile_lute_guillaume_dufay-59-88.mp3" "a/liquid_zen-magic_midsummer-06-por_tus_ojos-88-117.mp3" "8/stargarden-music_for_modern_listening-04-termoli-30-59.mp3" +"13748" "29958" "28479" "7" "10" "17" "f/magnatune-relaxation_spa-03-march_thirtyone_falling_you-204-233.mp3" "d/paul_avgerinos-words_touch-06-weightless_you_are-610-639.mp3" "a/liquid_zen-magic_midsummer-06-por_tus_ojos-88-117.mp3" +"42271" "17701" "3551" "1" "0" "0" "9/the_strap_ons-geeking_crime-10-chucky_got_lucky-30-59.mp3" "5/satori-for_relaxation-04-forest_surround-117-146.mp3" "1/the_rajdhani_quartet-the_gandhi_memorial_concert-01-raga_hansa_kalyani-610-639.mp3" +"43358" "18230" "7035" "5" "4" "31" "b/jacob_heringman-jane_pickeringes_lute_book-10-my_lord_willoughbies_welcom_home_by_mr_byrde-59-88.mp3" "0/jeffrey_luck_lucas-what_we_whisper-04-just_like_moths-146-175.mp3" "9/indidginus-sine_language-02-dusty_lands-0-29.mp3" +"2423" "52368" "13041" "0" "1" "1" "c/liquid_zen-oscilloscope-01-levier_darmement-204-233.mp3" "b/altri_stromenti-uccellini-15-alla_roversa_uccellini-30-59.mp3" "8/mercy_machine-in_your_bed__the_remixes-03-fountain_street_q_burns_abstract_message_mix-88-117.mp3" +"27702" "37832" "10388" "1" "0" "1" "5/ralph_rousseau_meulenbroeks-moved_by_marais-06-le_folies_despagnes_le_deuxieme_livre_de_pieces_de_viole_1701-639-668.mp3" "f/memories_of_tomorrow-waiting_for_dawn-08-the_games_you_play_trance_mix-30-59.mp3" "f/hans_christian-surrender-02-tears_for_ma-291-320.mp3" +"909" "32451" "27858" "14" "18" "3" "7/claire_fitch-ambiencellist_part_ii-01-cellosphere-117-146.mp3" "4/justin_bianco-siren-07-never_a_light-0-29.mp3" "9/suzanne_teng-enchanted_wind-06-loltun-175-204.mp3" +"51176" "41762" "4161" "15" "4" "22" "c/strojovna_07-number_1-14-blaudisco-175-204.mp3" "d/rapoon-vernal_crossing-09-yitun-291-320.mp3" "3/etherfysh-box_of_fysh-01-sanctuary-378-407.mp3" +"39938" "41134" "158" "1" "0" "1" "8/magnatune-red_hat_summit_compilation-09-lizzi__lay_down-117-146.mp3" "b/ehren_starks-the_depths_of_a_year-09-subtle_groove-30-59.mp3" "5/new_york_consort_of_viols-dances_and_canzonas_of_holborne_and_brade-01-3_almaines_holborne-610-639.mp3" +"29295" "33887" "50200" "3" "0" "0" "f/rapoon-tin_of_drum-06-southbound-639-668.mp3" "b/paul_beier-alessandro_piccinini-07-toccata_xvii-175-204.mp3" "b/cargo_cult-alchemy-13-matt-0-29.mp3" +"49514" "21556" "49311" "0" "1" "0" "1/tim_rayborn-qadim-13-amniat__robab_afghan-59-88.mp3" "a/james_edwards-le_tresor_dorphee_by_antoine_francisque-05-branle_simple_a_cordes_avalees_troisiesme-30-59.mp3" "a/janine_johnson-chopin_recital-12-valse_brilliante_op_18_in_eb_major-146-175.mp3" +"35888" "1802" "7613" "0" "0" "2" "5/processor-are_you_for_real-08-impulse-146-175.mp3" "f/jackalopes-jacksploitation-01-good_clean_fun_laundromat-30-59.mp3" "a/the_headroom_project-ciri_a_doro-02-gunklat_sasmazor-262-291.mp3" +"36741" "27047" "14366" "26" "23" "14" "b/rebel_rebel-explode_into_space-08-our_time_is_now-30-59.mp3" "c/rapoon-easterly_6_or_7-06-falling_more_slowly-204-233.mp3" "9/vito_paternoster-cd2bach_sonatas_and_partitas_for_solo_violin-03-partita_seconda_in_sol_minore__sarabanda-117-146.mp3" +"21109" "39376" "9147" "2" "8" "5" "1/solace-the_gathering_season-05-aenaem-146-175.mp3" "5/arthur_yoria-suerte_something-09-greek_archaic-88-117.mp3" "a/bjorn_fogelberg-karooshi_porn-02-quite_derivative-88-117.mp3" +"18221" "37812" "58717" "40" "5" "9" "c/seismic_anamoly-ramifications-04-julies_tune-552-581.mp3" "2/jami_sieber-lush_mechanique-08-the_darkening_ground-59-88.mp3" "c/edward_martin-art_of_the_lute_in_renaissance_france-35-duel_double_duel_lupi-30-59.mp3" +"34360" "6826" "46312" "0" "0" "1" "6/ralph_meulenbroeks-gambomania-08-cors_de_chasse_in_d_major_louis_de_caix_dhervelois-117-146.mp3" "f/strojovna_07-switch_on__switch_off-02-deluxe-175-204.mp3" "a/ensemble_sreteniye-ancient_church_singing_of_byzantine_georgia_and_rus-11-o_theotokos_the_virgin_rejoice_ave_maria-1451-1480.mp3" +"10329" "6190" "41054" "0" "0" "2" "8/tim_rayborn-veils_of_light-02-tabak-204-233.mp3" "f/strojovna_07-dirnix-02-basetra_noje-204-233.mp3" "d/jeff_wahl-guitarscapes-09-spanish_ballad-30-59.mp3" +"16703" "53972" "342" "29" "1" "6" "1/dac_crowell-sferica-04-benthic-929-958.mp3" "1/tanya_tomkins_and_eric_zivian-beethoven_piano_and_cello_works-16-sonata_in_d_major_op__102_no__2_adagio_con_molto_sentimento_daffetto-59-88.mp3" "8/jacob_heringman-josquin_des_prez_lute_settings-01-anon__praeter_rerum_seriem-117-146.mp3" +"47903" "56140" "33640" "3" "11" "10" "a/jade_leary-the_lost_art_of_human_kindness-12-earth_beyond_a_finite_thought-146-175.mp3" "9/the_sarasa_ensemble-a_baroque_mosaic-20-cantata_il_delirio_amoroso__aria_handel-233-262.mp3" "d/ambient_teknology-the_all_seeing_eye_project-07-telekonology-204-233.mp3" +"54126" "52490" "10128" "0" "0" "1" "1/vito_paternoster-cd1bach_cello_suites-16-suite_vi_in_re_magiore__sarabande-117-146.mp3" "b/richard_savino-mertz__bardic_sounds-15-childrens_fairy_tale-175-204.mp3" "3/spinecar-autophile-02-stay-30-59.mp3" +"11075" "11839" "2626" "2" "5" "4" "1/zilla-egg-02-wicker_pilots-59-88.mp3" "5/rapoon-cidar-03-black_feel-88-117.mp3" "4/jay_kishor-the_color_of_night-01-malkauns-1248-1277.mp3" +"28288" "51177" "21983" "2" "21" "3" "9/lisa_debenedictis-tigers-06-ocean_in_her_head-117-146.mp3" "c/strojovna_07-number_1-14-blaudisco-204-233.mp3" "d/paul_avgerinos-balancing_spheres-05-day_dreams__and_thirst_is_quenched-552-581.mp3" +"50933" "41647" "52143" "2" "0" "0" "1/etherine-24_days-13-this_was_my_last_hope-88-117.mp3" "e/atomic_opera-penguin_dust-09-watergrave-0-29.mp3" "4/tim_rayborn-the_path_beyond-14-taqsim_ud-30-59.mp3" +"55116" "8522" "5024" "0" "2" "0" "4/john_williams-long_ride_home-18-dog_dont_hunt-175-204.mp3" "c/jay_kishor-the_payans_concert-02-nat_malhar-1683-1712.mp3" "2/indidginus-seismic-01-superstring-30-59.mp3" +"39518" "49616" "55555" "41" "6" "2" "2/burnshee_thornside-blues_and_misc-09-i_cant_say_no-204-233.mp3" "a/jacob_heringman-black_cow-13-bakfark_non_accedat_ad_te_malum_secunda_pars-204-233.mp3" "e/steven_devine-portrait_of_an_english_harpsichord-19-sonata_no_2_in_a_major_minuet_joseph_gibbs-59-88.mp3" +"46366" "13998" "20805" "24" "7" "5" "7/american_bach_soloists-j_s__bach__mass_in_b_minor_cd2-11-osanna_in_exelsis-117-146.mp3" "6/stargarden-ambient_excursions-03-mt_fuji-494-523.mp3" "e/tilopa-out_of_the_blue-04-wings_over_water-320-349.mp3" +"5453" "6992" "25493" "11" "3" "2" "f/dac_crowell-redshifted_harmonies-01-tranquilitatis-1509-1538.mp3" "5/domased-selection-02-drunk_warrior-146-175.mp3" "6/drop_trio-cezanne-05-wreck_of_the_zephyr-407-436.mp3" +"14463" "25278" "26168" "0" "5" "8" "0/daniel_ben_pienaar-book_2_cd2_welltempered_clavier-03-prelude_and_fugue_no__14_in_fsharp_minor_bwv_883_praeludium-30-59.mp3" "e/justin_bianco-forge-05-upon_the_shore-117-146.mp3" "a/falik-dreams_from_the_machine-06-black_llama-30-59.mp3" +"803" "34628" "25656" "0" "1" "1" "f/lisa_debenedictis-fruitless-01-brilliant_day-0-29.mp3" "4/magnatune-romantic_dinner_classical_compilation-08-american_baroque__quartet_in_g_mozart-0-29.mp3" "1/tanya_tomkins_and_eric_zivian-beethoven_piano_and_cello_works-06-11_bagatelles_for_solo_fortepiano_op__119_no__3-59-88.mp3" +"1128" "51065" "3224" "0" "2" "0" "d/ambient_teknology-the_all_seeing_eye_project-01-cyclops-523-552.mp3" "e/steven_devine-portrait_of_an_english_harpsichord-14-sonata_no_4_in_c_minor_largo_joseph_kelway-88-117.mp3" "1/solace-the_gathering_season-01-paradise_lost-204-233.mp3" +"41578" "46571" "45563" "2" "3" "3" "5/rapoon-fallen_gods-09-valley-117-146.mp3" "4/american_bach_soloists-j_s__bach__mass_in_b_minor_cd1-11-quoniam_to_solus_sanctus-146-175.mp3" "e/justin_bianco-forge-11-ether-88-117.mp3" +"20975" "44684" "47952" "2" "0" "0" "f/jackalopes-jacksploitation-05-1_percent_muthafucka-0-29.mp3" "e/jamie_janover_and_michael_masley-all_strings_considered-10-twice_versa-88-117.mp3" "a/ensemble_sreteniye-ancient_church_singing_of_byzantine_georgia_and_rus-12-eucharist_canon_fragment-175-204.mp3" +"21336" "52054" "40740" "2" "1" "16" "9/janine_johnson-german_keyboard_masters-05-auf_das_heilige_pfingstfest_pachelbel-59-88.mp3" "d/katherine_roberts_perl-j_s__bach_french_suites-14-suite_no_3_in_b_minor_bwv_814_allemande-146-175.mp3" "f/ivilion-sartinal-09-sartinal_iii-175-204.mp3" +"34893" "21576" "49025" "1" "5" "1" "c/o_fickle_fortune-a_celebration_of_robert_burns-08-bonie_wee_thing-0-29.mp3" "3/mandrake_root-the_seventh_mirror-05-build_o_burn-30-59.mp3" "2/vito_paternoster-cd2bach_cello_suites-12-suite_iv_in_mi_bemolle_maggiore__gigue-88-117.mp3" +"28337" "3892" "54954" "1" "0" "1" "6/the_kokoon-erase-06-order-59-88.mp3" "5/sitar-first_congregational_church_concert-01-raga_patdeep__alap__jay_kishor-871-900.mp3" "b/janine_johnson-bach_goldberg_variations-17-variatio_16_a_1_clav_ouverture-0-29.mp3" +"53557" "23383" "20548" "21" "37" "4" "e/briddes_roune-lenten_is_come-16-brid_by_a_strem-59-88.mp3" "8/burning_babylon-knives_to_the_treble-05-mash_up_the_dj-117-146.mp3" "4/jami_sieber-second_sight-04-tree_of_love-233-262.mp3" +"13737" "5498" "36086" "0" "2" "0" "f/thursday_group-first_album-03-lynchs_law-117-146.mp3" "c/the_art_of_the_lute_player-the_art_of_the_lute_player-01-un_gay_bergier__thomas_crecquillion__valentin_bakfark__jacob_heringman-117-146.mp3" "2/stellamara-star_of_the_sea-08-karuna-262-291.mp3" +"3083" "37706" "7457" "1" "0" "0" "5/arthur_yoria-of_the_lovely-01-of_the_lovely-146-175.mp3" "6/falik-streaks_and_strokes-08-tabouli_gumbo-30-59.mp3" "7/claire_fitch-ambiencellist_part_ii-02-four_days-204-233.mp3" +"40133" "11549" "15037" "1" "1" "1" "f/lizzi-love_and_you_and_i-09-my_destination-59-88.mp3" "a/tilopa-by_the_way-03-amigos_de_viaje-204-233.mp3" "1/spinecar-up_from_the_mud-03-smoke-0-29.mp3" +"21993" "25776" "29873" "1" "1" "6" "d/paul_avgerinos-balancing_spheres-05-day_dreams__and_thirst_is_quenched-842-871.mp3" "f/magnatune-relaxation_spa-06-after_claire_fitch-146-175.mp3" "e/touchinggrace-submission-06-watching_clouds-175-204.mp3" +"3868" "14053" "26026" "0" "2" "0" "5/sitar-first_congregational_church_concert-01-raga_patdeep__alap__jay_kishor-175-204.mp3" "9/indidginus-sine_language-03-myriad-204-233.mp3" "0/solace-iman-06-azure-30-59.mp3" +"35888" "7613" "1802" "0" "1" "0" "5/processor-are_you_for_real-08-impulse-146-175.mp3" "a/the_headroom_project-ciri_a_doro-02-gunklat_sasmazor-262-291.mp3" "f/jackalopes-jacksploitation-01-good_clean_fun_laundromat-30-59.mp3" +"49616" "39518" "55555" "5" "34" "6" "a/jacob_heringman-black_cow-13-bakfark_non_accedat_ad_te_malum_secunda_pars-204-233.mp3" "2/burnshee_thornside-blues_and_misc-09-i_cant_say_no-204-233.mp3" "e/steven_devine-portrait_of_an_english_harpsichord-19-sonata_no_2_in_a_major_minuet_joseph_gibbs-59-88.mp3" +"27326" "37180" "38122" "15" "10" "17" "9/the_seldon_plan-making_circles-06-holding_patterns_are_slow-175-204.mp3" "c/william_brooks-fowl_mouth-08-scooter-0-29.mp3" "c/rapoon-easterly_6_or_7-08-variable_2-349-378.mp3" +"813" "175" "12655" "0" "0" "1" "9/wicked_boy-chemistry-01-buy_my_pullovers-30-59.mp3" "e/burning_babylon-stereo_mash_up-01-7_nine_skank-0-29.mp3" "6/tom_paul-i_was_king-03-drops_that_hit_the_sand-204-233.mp3" +"45114" "38721" "7927" "0" "0" "3" "4/jami_sieber-hidden_sky-11-arms_of_the_mother-117-146.mp3" "6/sand_craig_and_dornenburg-biber_violin_sonatas-09-biber_rosary_sonata_14_in_d_major_the_assumption_of_mary-204-233.mp3" "5/arthur_yoria-of_the_lovely-02-just_like_you-88-117.mp3" +"18968" "49995" "8884" "2" "8" "3" "6/paul_beier-michelagnolo_galilei-04-passemezzo_and_saltarello-233-262.mp3" "9/lisa_debenedictis-tigers-13-girl_and_supergirl-59-88.mp3" "9/musica_franca-corrette__le_phenix__les_delices_de_la_solitude-02-phenix__adagio-30-59.mp3" +"48750" "34898" "39821" "0" "0" "3" "f/philharmonia_baroque_orchestra-handel__atalanta_cd2-12-recitativo_sono_irene_oppur_sogno-0-29.mp3" "c/o_fickle_fortune-a_celebration_of_robert_burns-08-bonie_wee_thing-146-175.mp3" "c/hands_upon_black_earth-hands_upon_black_earth-09-lastime-30-59.mp3" +"22058" "43611" "31227" "1" "0" "4" "1/mijo-fata_morgana-05-deep_bass_9-146-175.mp3" "8/stargarden-music_for_modern_listening-10-p5_mon_amour-146-175.mp3" "b/shane_jackman-sanctuary-07-early_frost-175-204.mp3" +"33029" "448" "20213" "0" "3" "3" "1/zilla-egg-07-rufus-88-117.mp3" "b/satori-journey_to_other_worlds-01-astral_flight-813-842.mp3" "b/rob_costlow-sophomore_jinx-04-summer_garden-204-233.mp3" +"45936" "5881" "227" "1" "0" "1" "e/magnatune_compilation-high_energy_rock_and_roll-11-jackalopes_frankenstein_girl-59-88.mp3" "9/american_bach_soloists-heinrich_schutz__musicalische_exequien-02-musicalische_exequien_swv_280_teil_ii_motette_herr_wenn_ich_nur_dich_habe-59-88.mp3" "9/the_wretch-ambulatory-01-admission-30-59.mp3" +"2423" "13041" "52368" "0" "0" "1" "c/liquid_zen-oscilloscope-01-levier_darmement-204-233.mp3" "8/mercy_machine-in_your_bed__the_remixes-03-fountain_street_q_burns_abstract_message_mix-88-117.mp3" "b/altri_stromenti-uccellini-15-alla_roversa_uccellini-30-59.mp3" +"40226" "12032" "41470" "26" "11" "3" "d/drevo-christian_themes_in_ukrainian_folk_songs-09-o_did_you_learn_o_people_such_news-146-175.mp3" "e/dac_crowell-within_this_space-03-cantillation-639-668.mp3" "b/suzanne_teng-miles_beyond-09-tiens_lullaby-175-204.mp3" +"43720" "21806" "27879" "7" "11" "8" "5/arthur_yoria-suerte_something-10-places_everyone-59-88.mp3" "b/philharmonia_baroque-mozart_orchestral_works-05-concerto_for_flute_and_harp_in_c_major_kv299_allegro-175-204.mp3" "5/mrdc-plethora-06-lostcode-88-117.mp3" +"44689" "6034" "44918" "0" "0" "1" "7/rob_costlow-woods_of_chaos-10-twilight-88-117.mp3" "0/chris_harvey-the_white_sail-02-allegiance-146-175.mp3" "e/steven_devine-portrait_of_an_english_harpsichord-11-sonata_no_1_in_f_major_allegro_thomas_arne-0-29.mp3" +"8773" "7889" "53837" "0" "0" "1" "b/satori-journey_to_other_worlds-02-other_worlds-1596-1625.mp3" "5/rapoon-fallen_gods-02-iron_path-262-291.mp3" "2/duo_chambure-vihuela_duets_of_valderrabano-16-petite_camusete_adriano_willaert-30-59.mp3" +"15856" "42746" "528" "2" "1" "1" "a/jade_leary-and_come_the_sirens-03-tropics-88-117.mp3" "0/apa_ya-apa_ya-10-funky_funky-88-117.mp3" "8/mercy_machine-mercy_machine-01-bastard-30-59.mp3" +"58513" "20219" "14212" "1" "1" "0" "b/janine_johnson-bach_goldberg_variations-29-variatio_28_a_2_clav-0-29.mp3" "e/touchinggrace-submission-04-sunday_driver-88-117.mp3" "d/tim_rayborn-chordae-03-o_suavis-697-726.mp3" +"3477" "26214" "12908" "0" "2" "1" "2/ensemble_sreteniye___three_holies_church_choristers-dont_cry_rachael-01-psalm_128_at_the_time_of_wedding_byzantine_chant-30-59.mp3" "1/etherine-24_days-06-burn_down_the_pleasure-146-175.mp3" "5/rapoon-fallen_gods-03-fallen_gods-523-552.mp3" +"53857" "44180" "18594" "1" "26" "2" "d/daniel_ben_pienaar-book_2_cd1_welltempered_clavier-16-prelude_and_fugue_no__8_in_dsharp_minor_bwv_877_fuga-88-117.mp3" "1/william_brooks-karma_dogs-10-slacker_blues-204-233.mp3" "e/justin_bianco-forge-04-mischief-0-29.mp3" +"29958" "13748" "28479" "18" "8" "16" "d/paul_avgerinos-words_touch-06-weightless_you_are-610-639.mp3" "f/magnatune-relaxation_spa-03-march_thirtyone_falling_you-204-233.mp3" "a/liquid_zen-magic_midsummer-06-por_tus_ojos-88-117.mp3" +"33882" "57076" "9150" "0" "1" "3" "b/paul_beier-alessandro_piccinini-07-toccata_xvii-30-59.mp3" "d/daniel_ben_pienaar-book_2_cd1_welltempered_clavier-22-prelude_and_fugue_no__11_in_f_major_bwv_880_fuga-59-88.mp3" "a/bjorn_fogelberg-karooshi_porn-02-quite_derivative-175-204.mp3" +"34190" "8580" "49580" "7" "3" "9" "a/bjorn_fogelberg-karooshi_porn-07-wave-117-146.mp3" "4/dac_crowell-spctr-02-northeastern_corridor-610-639.mp3" "b/dj_cary-power_synths-13-atom__instrumental_mix_trancevision-30-59.mp3" +"32218" "9899" "10775" "35" "46" "67" "8/tim_rayborn-veils_of_light-07-maaz_zami-30-59.mp3" "5/edward_martin_and_paul_berget-baroque_lute_duets-02-sonata_in_c_major_weiss__allegro_1_and_2-204-233.mp3" "7/dac_crowell-the_sea_and_the_sky-02-umi_no_kami_ni_kansha-1480-1509.mp3" +"43201" "52271" "36572" "22" "30" "28" "a/bjorn_fogelberg-karooshi_porn-10-low_level_format-0-29.mp3" "1/etherine-24_days-14-what_if_it_happened_somewhere_else-59-88.mp3" "e/yongen-moonrise-08-nylon_heart-117-146.mp3" +"39653" "50052" "41805" "8" "6" "2" "6/dj_markitos-slower_emotions138_bpm_remixes-09-inside_your_dreams_138_bpm_remix-146-175.mp3" "6/tom_paul-i_was_king-13-i_wish_the_world_was_flat-59-88.mp3" "0/rocket_city_riot-last_of_the_pleasure_seekers-10-in_my_bones-30-59.mp3" +"30745" "28431" "2816" "11" "2" "9" "3/spinecar-autophile-07-cant_sleep-0-29.mp3" "5/four_stones_net-ridin_the_faders-06-part_6-88-117.mp3" "f/memories_of_tomorrow-waiting_for_dawn-01-memories_of_tomorrow-291-320.mp3" +"15538" "40035" "55703" "11" "2" "7" "5/processor-are_you_for_real-03-technojorgen-117-146.mp3" "a/jade_leary-the_lost_art_of_human_kindness-09-meaner_than_winter-146-175.mp3" "a/edward_martin_and_william_bastian-virtues_and_vices-19-eau_vive_source_damour_jacues_mauduit-175-204.mp3" +"42271" "3551" "17701" "2" "0" "0" "9/the_strap_ons-geeking_crime-10-chucky_got_lucky-30-59.mp3" "1/the_rajdhani_quartet-the_gandhi_memorial_concert-01-raga_hansa_kalyani-610-639.mp3" "5/satori-for_relaxation-04-forest_surround-117-146.mp3" +"49025" "21576" "34893" "1" "8" "1" "2/vito_paternoster-cd2bach_cello_suites-12-suite_iv_in_mi_bemolle_maggiore__gigue-88-117.mp3" "3/mandrake_root-the_seventh_mirror-05-build_o_burn-30-59.mp3" "c/o_fickle_fortune-a_celebration_of_robert_burns-08-bonie_wee_thing-0-29.mp3" +"19314" "8003" "42101" "2" "1" "0" "3/etherfysh-box_of_fysh-04-rama-0-29.mp3" "2/solace-ahsas-02-khatar_dawr_hindi_78-30-59.mp3" "b/altri_stromenti-uccellini-10-beatus_uccellini-59-88.mp3" +"28431" "48816" "32254" "1" "1" "2" "5/four_stones_net-ridin_the_faders-06-part_6-88-117.mp3" "9/the_seldon_plan-making_circles-12-samuel_p__huntington-175-204.mp3" "e/briddes_roune-lenten_is_come-07-man_mai_longe_lives_weene-59-88.mp3" +"53779" "13138" "2848" "48" "17" "12" "b/la_primavera-english_renaissance_music-16-martin_said_to_his_man_anon-59-88.mp3" "b/electric_frankenstein-dead_and_back-03-good_for_nothing-88-117.mp3" "4/norine_braun-modern_anguish-01-modern_anguish-204-233.mp3" +"11904" "31108" "9312" "3" "7" "5" "d/belief_systems-eproms-03-boiling-175-204.mp3" "5/human_response-survival-07-distance-262-291.mp3" "5/sitar-first_congregational_church_concert-02-raga_patdeep__gat__jay_kishor-1277-1306.mp3" +"2486" "4071" "56693" "6" "14" "20" "9/etherine-gleam-01-lost-262-291.mp3" "3/dj_cary-downtempo_chill-01-ruff_and_tumble_mr_epic-233-262.mp3" "9/american_baroque-dances_and_suites_of_rameau_and_couperin-21-les_fauvetes_plaintives_xiveme_ordre_couperin-146-175.mp3" +"23780" "11633" "3914" "1" "0" "1" "e/yongen-moonrise-05-one-204-233.mp3" "0/apa_ya-apa_ya-03-antare_e_dhu-175-204.mp3" "5/sitar-first_congregational_church_concert-01-raga_patdeep__alap__jay_kishor-1509-1538.mp3" +"52830" "5259" "22715" "1" "0" "1" "d/sarasa_ensemble_labelle_sylvan-bach_cantatas-15-liebster_jesu_mein_veriangen_aria-146-175.mp3" "b/ehren_starks-the_depths_of_a_year-01-the_tale_of_room_620-30-59.mp3" "5/domased-selection-05-highway_accident-146-175.mp3" +"47279" "42679" "5464" "2" "3" "4" "f/glen_bledsoe-octopants-11-und_so_weiter_i-0-29.mp3" "8/cargo_cult-vibrant-10-fifth-59-88.mp3" "a/bjorn_fogelberg-karooshi_porn-01-trioxidation-0-29.mp3" +"41134" "39938" "158" "0" "1" "1" "b/ehren_starks-the_depths_of_a_year-09-subtle_groove-30-59.mp3" "8/magnatune-red_hat_summit_compilation-09-lizzi__lay_down-117-146.mp3" "5/new_york_consort_of_viols-dances_and_canzonas_of_holborne_and_brade-01-3_almaines_holborne-610-639.mp3" +"25176" "10426" "33454" "2" "1" "1" "6/dr_kuch-analog_disease-05-the_persuaders_dub-117-146.mp3" "4/paul_avgerinos-sky_of_grace-02-the_calling-117-146.mp3" "a/falling_you-human-07-starshine-378-407.mp3" +"28835" "13286" "17708" "2" "0" "0" "9/self_delusion-happiness_hurts_me-06-secret_life-175-204.mp3" "3/sherefe-opium-03-huzam_kamelieriko-59-88.mp3" "1/jeff_wahl-guinevere-04-freedom-30-59.mp3" +"10900" "51861" "49373" "6" "1" "0" "5/paul_beier-simone_molinaro-02-ung_gaij_bergier__canzone_francese_a_quattro_di_thomas_crequillon-146-175.mp3" "7/jeni_melia-the_last_of_old_england-14-she_moved_through_the_fair_trad-117-146.mp3" "e/briddes_roune-lenten_is_come-12-worldes_blis-88-117.mp3" +"26678" "21897" "19853" "0" "0" "2" "e/hans_christian-phantoms-06-desperado-204-233.mp3" "6/curandero-curandero-05-corriendo_juntos-117-146.mp3" "1/phebe_craig_and_katherine_westine-beside_themselves-04-soler_conceierto_ii_in_a_minor_andante-175-204.mp3" +"20042" "1973" "24908" "22" "110" "21" "3/les_filles_de_sainte_colombe-german_music_for_viols_and_harpsichord-04-sonata_xi_in_g_major_schenck-291-320.mp3" "7/paul_avgerinos-phos_hilaron-01-humbly_i_adore_thee-465-494.mp3" "2/vito_paternoster-cd2bach_cello_suites-05-suite_iii_in_do_maggiore__bourree_i_e_ii-88-117.mp3" +"50057" "18157" "44551" "11" "8" "1" "6/tom_paul-i_was_king-13-i_wish_the_world_was_flat-204-233.mp3" "6/mercy_machine-in_your_bed-04-invisible-146-175.mp3" "a/jade_leary-the_lost_art_of_human_kindness-10-the_lost_art_of_human_kindness-233-262.mp3" +"18157" "50057" "44551" "6" "8" "4" "6/mercy_machine-in_your_bed-04-invisible-146-175.mp3" "6/tom_paul-i_was_king-13-i_wish_the_world_was_flat-204-233.mp3" "a/jade_leary-the_lost_art_of_human_kindness-10-the_lost_art_of_human_kindness-233-262.mp3" +"35229" "8767" "45934" "0" "0" "2" "4/american_bach_soloists-j_s__bach__mass_in_b_minor_cd1-08-domine_deus-175-204.mp3" "b/satori-journey_to_other_worlds-02-other_worlds-1422-1451.mp3" "e/magnatune_compilation-high_energy_rock_and_roll-11-jackalopes_frankenstein_girl-0-29.mp3" +"33478" "43612" "15969" "1" "1" "0" "5/stargarden-step_off-07-step_out_to_the_stars-581-610.mp3" "8/stargarden-music_for_modern_listening-10-p5_mon_amour-175-204.mp3" "1/zilla-egg-03-velvet_periscope-233-262.mp3" +"47739" "55783" "36335" "2" "35" "6" "b/magnatune_compilation-rock-12-cargo_cult_our_song-30-59.mp3" "9/american_baroque-dances_and_suites_of_rameau_and_couperin-19-le_rossignol_en_amour_xiveme_ordre_couperin-0-29.mp3" "7/roots_of_rebellion-the_looking_glass-08-messenger-175-204.mp3" +"38419" "58376" "21451" "3" "1" "2" "e/burning_babylon-stereo_mash_up-09-1500_tons-0-29.mp3" "c/magnatune-classical-28-jan_lathamkoenig_franck_psyche_enlevee_par_les_zephirs-146-175.mp3" "b/rob_costlow-sophomore_jinx-05-bliss-59-88.mp3" +"21321" "44532" "39220" "0" "0" "2" "e/hans_christian-phantoms-05-atlantis-59-88.mp3" "6/falik-streaks_and_strokes-10-the_last_faery-0-29.mp3" "8/beth_quist-lucidity-09-ferte_preza_na_prezarro-146-175.mp3" +"4847" "10846" "32069" "2" "26" "5" "8/hybris-the_first_words-01-squalid_rooms-59-88.mp3" "5/thursday_group-uncle_mean-02-uncle_mean-233-262.mp3" "2/mrdc-timecode-07-leaving-88-117.mp3" +"24295" "22528" "31104" "2" "1" "0" "9/the_kokoon-berlin-05-scout-117-146.mp3" "6/philharmonia_baroque-rameau_and_leclair-05-gavottes__air_pour_les_pagodes__contredanse_en_roundeau_rameau_les_paladins-88-117.mp3" "5/human_response-survival-07-distance-146-175.mp3" +"10900" "49373" "51861" "3" "2" "0" "5/paul_beier-simone_molinaro-02-ung_gaij_bergier__canzone_francese_a_quattro_di_thomas_crequillon-146-175.mp3" "e/briddes_roune-lenten_is_come-12-worldes_blis-88-117.mp3" "7/jeni_melia-the_last_of_old_england-14-she_moved_through_the_fair_trad-117-146.mp3" +"28315" "5002" "55324" "0" "3" "6" "f/norine_braun-and_the_mood_swings-06-one_breath-146-175.mp3" "4/dj_markitos-evolution_of_the_mind-01-sunset_endless_night_journey_remix-30-59.mp3" "f/philharmonia_baroque_orchestra-handel__atalanta_cd2-18-recitativo_lo_vo_morir___gia_corro___-0-29.mp3" +"11535" "10387" "34308" "1" "1" "1" "8/beth_quist-lucidity-03-american_giver-0-29.mp3" "f/hans_christian-surrender-02-tears_for_ma-262-291.mp3" "4/seismic_anamoly-sweet_rock_candy-07-wreckinball-436-465.mp3" +"4857" "2057" "1770" "0" "2" "0" "e/mountain_mirrors-mountain_mirrors-01-stay_evil-59-88.mp3" "f/thursday_group-first_album-01-innocent_murmur-146-175.mp3" "8/sun_palace-give_me_a_perfect_world-01-give_me_a_perfect_world-59-88.mp3" +"19601" "37546" "8952" "1" "0" "0" "9/suzanne_teng-enchanted_wind-04-septembers_angels-262-291.mp3" "5/dj_markitos-unreachable_destiny-08-strange_sensations-59-88.mp3" "0/the_bots-truth-02-power_and_domination-30-59.mp3" +"56581" "37228" "18853" "7" "12" "3" "0/ensemble_vermillian-stolen_jewels-21-buxtehude_sonata_iii_op__i_prestoadagio-59-88.mp3" "a/yongen-yello_haus-08-shackles_of_gold-175-204.mp3" "0/william_brooks-bitter_circus-04-ole_soledad-0-29.mp3" +"44579" "23901" "4333" "0" "0" "1" "e/john_jackson-bad_things_happen_all_the_time-10-through_the_glass-30-59.mp3" "5/thursday_group-uncle_mean-05-pelican_fan-349-378.mp3" "2/magnatune_remixed-ridin_the_faders_2-01-shining_star_games_az_egan_csillag_lenni__fourstones-30-59.mp3" +"27702" "10388" "37832" "2" "0" "0" "5/ralph_rousseau_meulenbroeks-moved_by_marais-06-le_folies_despagnes_le_deuxieme_livre_de_pieces_de_viole_1701-639-668.mp3" "f/hans_christian-surrender-02-tears_for_ma-291-320.mp3" "f/memories_of_tomorrow-waiting_for_dawn-08-the_games_you_play_trance_mix-30-59.mp3" +"36147" "50477" "15309" "0" "2" "0" "7/artemis-gravity-08-la_belle-88-117.mp3" "0/american_bach_soloists-j_s__bach__transcriptions_of_italian_music-13-psalm_51_tilge_hochster_meine_sunden_x_andante_verses_11_through_15-291-320.mp3" "b/belief_systems-eponyms-03-starbase_lounge_music-146-175.mp3" +"1964" "8159" "4605" "1" "0" "1" "7/paul_avgerinos-phos_hilaron-01-humbly_i_adore_thee-204-233.mp3" "0/jag-juke_joint_boogie-02-lay_down_your_plow-0-29.mp3" "1/tanya_tomkins_and_eric_zivian-beethoven_piano_and_cello_works-01-sonata_in_g_minor_op__5_no__2_adagio___allegro-204-233.mp3" +"45459" "15092" "44652" "4" "5" "22" "e/joram-moments_of_clarity-11-diemos-146-175.mp3" "2/version-versions-03-solaire-320-349.mp3" "c/telemann_trio_berlin-telemann_trio_berlin-10-triosonate_in_ddur__allegro_c_p__bach-233-262.mp3" +"24525" "58630" "9935" "13" "3" "2" "9/arthur_yoria-ill_be_here_awake-05-sleep_is_on_the_way-117-146.mp3" "b/janine_johnson-bach_goldberg_variations-31-variatio_30_a_1_clav_quodlibet-30-59.mp3" "4/seth_carlin-schubert__works_for_solo_fortepiano-02-sonata_in_eflat_major_opus_posth__122__andante_molto-0-29.mp3" +"46909" "48220" "649" "15" "73" "22" "1/solace-the_gathering_season-11-sudan-146-175.mp3" "f/strojovna_07-switch_on__switch_off-12-jala__jala-204-233.mp3" "c/jay_kishor-the_payans_concert-01-bhimpalasi-1770-1799.mp3" +"9069" "8394" "19206" "1" "1" "1" "a/ensemble_sreteniye-ancient_church_singing_of_byzantine_georgia_and_rus-02-psalm_146_and_o_thou_the_only_begotten_son-59-88.mp3" "9/various_artists-the_2007_magnatune_records_sampler-02-moonrise_yongen-59-88.mp3" "3/mandrake_root-the_seventh_mirror-04-put_your_money_where_your_mouth_is-0-29.mp3" +"5881" "45936" "227" "0" "1" "0" "9/american_bach_soloists-heinrich_schutz__musicalische_exequien-02-musicalische_exequien_swv_280_teil_ii_motette_herr_wenn_ich_nur_dich_habe-59-88.mp3" "e/magnatune_compilation-high_energy_rock_and_roll-11-jackalopes_frankenstein_girl-59-88.mp3" "9/the_wretch-ambulatory-01-admission-30-59.mp3" +"20536" "46254" "9938" "1" "0" "0" "e/magnatune_com-magnatune_at_the_cc_salon-04-tranquilo_curandero-146-175.mp3" "f/kyiv_chamber_choir-masterpieces_of_the_ukrainian_choral_baroque-11-o_lord_thou_are_my_only_hope-378-407.mp3" "4/seth_carlin-schubert__works_for_solo_fortepiano-02-sonata_in_eflat_major_opus_posth__122__andante_molto-88-117.mp3" +"46254" "20536" "9938" "0" "2" "0" "f/kyiv_chamber_choir-masterpieces_of_the_ukrainian_choral_baroque-11-o_lord_thou_are_my_only_hope-378-407.mp3" "e/magnatune_com-magnatune_at_the_cc_salon-04-tranquilo_curandero-146-175.mp3" "4/seth_carlin-schubert__works_for_solo_fortepiano-02-sonata_in_eflat_major_opus_posth__122__andante_molto-88-117.mp3" +"55246" "27468" "56867" "1" "3" "1" "3/jacob_heringman-siena_lute_book-18-la_volunte_sandrin-117-146.mp3" "2/magnatune_compilation-electronica-06-indidginus_dusty_lands-262-291.mp3" "b/janine_johnson-bach_goldberg_variations-21-variatio_20_a_2_clav-88-117.mp3" +"8666" "45584" "5511" "2" "0" "0" "d/drevo-christian_themes_in_ukrainian_folk_songs-02-oh_uncle_in_your_yard-146-175.mp3" "0/rocket_city_riot-middle_age_suicide-11-everyday_like_saturday_bonus_track-30-59.mp3" "3/emmas_mini-beat_generation_mad_trick-01-unknown-146-175.mp3" +"41781" "45713" "25493" "5" "22" "2" "7/monoide-zeitpunkt-09-zeitpunkt-204-233.mp3" "7/beight-file_in_rhythm-11-ghostly_feeling-88-117.mp3" "6/drop_trio-cezanne-05-wreck_of_the_zephyr-407-436.mp3" +"35493" "27453" "44191" "5" "15" "1" "d/beth_quist-shall_we_dance-08-finale-320-349.mp3" "7/artemis-gravity-06-inception-146-175.mp3" "5/stargarden-the_art_of_analog_diversion-10-smooth-88-117.mp3" +"49311" "21556" "49514" "1" "0" "0" "a/janine_johnson-chopin_recital-12-valse_brilliante_op_18_in_eb_major-146-175.mp3" "a/james_edwards-le_tresor_dorphee_by_antoine_francisque-05-branle_simple_a_cordes_avalees_troisiesme-30-59.mp3" "1/tim_rayborn-qadim-13-amniat__robab_afghan-59-88.mp3" +"43509" "19374" "5396" "11" "7" "11" "7/claire_fitch-ambiencellist_part_ii-10-oceanic_memories-0-29.mp3" "3/dj_cary-downtempo_chill-04-reentry_cargo_cult-117-146.mp3" "7/jag-cypress_grove_blues-01-train_train-59-88.mp3" +"20288" "37692" "8922" "2" "2" "4" "9/introspekt-ideology-04-tbd-262-291.mp3" "7/solace-rhythm_of_the_dance-08-sword_dance_routine-204-233.mp3" "f/lisa_debenedictis-fruitless-02-planet_you-88-117.mp3" +"58843" "8725" "37140" "0" "2" "0" "8/jacob_heringman-blame_not_my_lute-49-grene_sleves-0-29.mp3" "b/satori-journey_to_other_worlds-02-other_worlds-204-233.mp3" "0/american_bach_soloists-joseph_haydn__masses-08-sanctus__adagio__allegro-117-146.mp3" +"38721" "45114" "7927" "0" "0" "3" "6/sand_craig_and_dornenburg-biber_violin_sonatas-09-biber_rosary_sonata_14_in_d_major_the_assumption_of_mary-204-233.mp3" "4/jami_sieber-hidden_sky-11-arms_of_the_mother-117-146.mp3" "5/arthur_yoria-of_the_lovely-02-just_like_you-88-117.mp3" +"49441" "41854" "12336" "1" "1" "0" "f/the_headroom_project-jetuton_andawai-13-rainmaker-59-88.mp3" "1/zilla-egg-10-a_bakers_dozen-30-59.mp3" "c/domased-new_memories-03-crazy_days-291-320.mp3" +"15755" "40200" "27167" "0" "1" "1" "2/ensemble_sreteniye___three_holies_church_choristers-dont_cry_rachael-03-to_thee_o_theotokos_bulgarian_chant-349-378.mp3" "f/memories_of_tomorrow-waiting_for_dawn-09-not_like_this-262-291.mp3" "a/jade_leary-and_come_the_sirens-06-forgotten-117-146.mp3" +"46456" "37064" "27316" "6" "18" "5" "0/beth_quist-silver-11-planet-291-320.mp3" "1/tim_rayborn-ashek-08-ravan-639-668.mp3" "a/mr_gelatine-electroluv-06-hiphop110-30-59.mp3" +"17881" "41492" "45650" "0" "1" "2" "0/beth_quist-silver-04-grace-204-233.mp3" "6/jeni_melia-the_lost_art_of_wooing-09-toccata_no_6_in_f_major_hieronymus_kapsberger-88-117.mp3" "e/solace-balance-11-feitian-175-204.mp3" +"18135" "34876" "10696" "1" "0" "1" "6/mercy_machine-in_your_bed__instrumental_mix-04-invisible_instrumental_mix-117-146.mp3" "4/monks_and_choirs_of_kiev_pechersk_lavra-chants_of_the_russian_orthodox_church-08-bless_is_the_man_authentic_kiev_chant-88-117.mp3" "c/telemann_trio_berlin-telemann_trio_berlin-02-triosonate_fur_flute_violine_und_basso__allegro_telemann-0-29.mp3" +"34360" "46312" "6826" "0" "0" "1" "6/ralph_meulenbroeks-gambomania-08-cors_de_chasse_in_d_major_louis_de_caix_dhervelois-117-146.mp3" "a/ensemble_sreteniye-ancient_church_singing_of_byzantine_georgia_and_rus-11-o_theotokos_the_virgin_rejoice_ave_maria-1451-1480.mp3" "f/strojovna_07-switch_on__switch_off-02-deluxe-175-204.mp3" +"36438" "1802" "55144" "0" "0" "1" "5/arthur_yoria-of_the_lovely-08-my_best_routines-88-117.mp3" "f/jackalopes-jacksploitation-01-good_clean_fun_laundromat-30-59.mp3" "1/jacob_heringman-holburns_passion-18-fantasia_lute-0-29.mp3" +"2968" "33712" "27443" "0" "0" "2" "d/paul_avgerinos-balancing_spheres-01-night_illusions__the_longing-610-639.mp3" "9/cheryl_ann_fulton-the_airs_of_wales-07-the_dimpled_cheek-0-29.mp3" "5/mr_epic-sideways-06-in-59-88.mp3" +"36696" "14385" "32403" "14" "14" "6" "7/paul_avgerinos-phos_hilaron-08-oneness-204-233.mp3" "3/very_large_array-stuff-03-perfect_place-88-117.mp3" "7/falik-elvolution-07-my_beloved-233-262.mp3" +"3858" "45324" "13497" "17" "16" "5" "9/sitar-cd1_the_sowebo_concert-01-raga_maru_bihag_alapjay_kishor-2089-2118.mp3" "5/burnshee_thornside-rock_this_moon-11-city_girls-30-59.mp3" "6/electric_frankenstein-conquers_the_world-03-just_like_your_mom-0-29.mp3" +"15538" "55703" "40035" "10" "3" "3" "5/processor-are_you_for_real-03-technojorgen-117-146.mp3" "a/edward_martin_and_william_bastian-virtues_and_vices-19-eau_vive_source_damour_jacues_mauduit-175-204.mp3" "a/jade_leary-the_lost_art_of_human_kindness-09-meaner_than_winter-146-175.mp3" +"10850" "36664" "12896" "1" "2" "0" "7/shane_jackman-equilibrium-02-uncommon_eloquence-0-29.mp3" "8/mediva-viva_mediva-08-ondas_da_mar_codax-175-204.mp3" "5/rapoon-fallen_gods-03-fallen_gods-175-204.mp3" +"41473" "10497" "25993" "1" "0" "1" "b/suzanne_teng-miles_beyond-09-tiens_lullaby-262-291.mp3" "5/cheryl_ann_fulton-the_once_and_future_harp-02-the_poet-349-378.mp3" "8/kenji_williams-worldspirit_soundtrack-06-aura-0-29.mp3" +"50678" "43820" "28144" "8" "10" "9" "c/vito_paternoster-cd1bach_sonatas_and_partitas_for_solo_violin-13-sonata_seconda_in_re_minore__grave-204-233.mp3" "6/grayson_wray-picassos_dream-10-pure_delight-117-146.mp3" "d/paul_avgerinos-maya__the_great_katun-06-night_of_the_goddess_part_2-378-407.mp3" +"40744" "50268" "21948" "1" "0" "1" "f/ivilion-sartinal-09-sartinal_iii-291-320.mp3" "9/junghae_kim-the_virginalists-13-my_ladye_nevells_ground_william_byrd-291-320.mp3" "b/ehren_starks-the_depths_of_a_year-05-dads_song-175-204.mp3" +"19314" "42101" "8003" "1" "0" "0" "3/etherfysh-box_of_fysh-04-rama-0-29.mp3" "b/altri_stromenti-uccellini-10-beatus_uccellini-59-88.mp3" "2/solace-ahsas-02-khatar_dawr_hindi_78-30-59.mp3" +"44142" "50101" "30302" "0" "0" "2" "c/mountain_mirrors-lunar_ecstasy-10-sidewinder-0-29.mp3" "4/justin_bianco-siren-13-incantation-0-29.mp3" "3/sherefe-opium-07-alf_leyla_wa_leyla-88-117.mp3" +"37696" "17012" "29056" "16" "3" "4" "7/solace-rhythm_of_the_dance-08-sword_dance_routine-320-349.mp3" "1/ambient_teknology-phoenix-04-cloud_maker-465-494.mp3" "6/falik-streaks_and_strokes-06-son_of_sand_flea-291-320.mp3" +"37795" "11263" "13068" "7" "41" "29" "8/hybris-the_first_words-08-the_choice_i_never_had-117-146.mp3" "0/american_bach_soloists-j_s__bach__cantatas_volume_v-03-gleichwie_der_regen_und_schnee_vom_himmel_fallt_bwv_18_iii_recitative_and_litany__mein_gott_hier_wird_mein_herze_sein-59-88.mp3" "b/dj_cary-power_synths-03-from_love_2_wicked_allstars-204-233.mp3" +"8688" "19207" "2086" "4" "2" "24" "3/mandrake_root-the_seventh_mirror-02-one_in_a_million-88-117.mp3" "3/mandrake_root-the_seventh_mirror-04-put_your_money_where_your_mouth_is-30-59.mp3" "c/dj_markitos-inside_your_dreams-01-interplanetary_travel-233-262.mp3" +"8426" "58323" "26605" "1" "0" "1" "1/dac_crowell-sferica-02-murata-697-726.mp3" "b/jacob_heringman-jane_pickeringes_lute_book-28-a_fantasia-88-117.mp3" "3/kitka-the_vine-06-de_szeretnek_az_egen_csillag_lenni_hungary-146-175.mp3" +"23903" "33304" "38465" "0" "2" "0" "5/thursday_group-uncle_mean-05-pelican_fan-407-436.mp3" "4/seth_carlin-schubert__works_for_solo_fortepiano-07-sonata_in_a_major_opus_posth__120__allegro-320-349.mp3" "1/ambient_teknology-phoenix-09-afterburner-146-175.mp3" +"813" "12655" "175" "0" "0" "2" "9/wicked_boy-chemistry-01-buy_my_pullovers-30-59.mp3" "6/tom_paul-i_was_king-03-drops_that_hit_the_sand-204-233.mp3" "e/burning_babylon-stereo_mash_up-01-7_nine_skank-0-29.mp3" +"57076" "33882" "9150" "0" "0" "2" "d/daniel_ben_pienaar-book_2_cd1_welltempered_clavier-22-prelude_and_fugue_no__11_in_f_major_bwv_880_fuga-59-88.mp3" "b/paul_beier-alessandro_piccinini-07-toccata_xvii-30-59.mp3" "a/bjorn_fogelberg-karooshi_porn-02-quite_derivative-175-204.mp3" +"28431" "32254" "48816" "1" "2" "0" "5/four_stones_net-ridin_the_faders-06-part_6-88-117.mp3" "e/briddes_roune-lenten_is_come-07-man_mai_longe_lives_weene-59-88.mp3" "9/the_seldon_plan-making_circles-12-samuel_p__huntington-175-204.mp3" +"29530" "27885" "40631" "9" "36" "8" "a/edward_martin_and_william_bastian-virtues_and_vices-06-tant_que_vivray_claudin_de_semisy-0-29.mp3" "5/mrdc-plethora-06-lostcode-262-291.mp3" "4/paul_berget-the_siena_manuscript_on_renaissance_lute-09-recercar_9-0-29.mp3" +"32332" "10810" "34856" "2" "2" "7" "f/tilopa-turkishauch-07-meifrui_ii-320-349.mp3" "7/dac_crowell-the_sea_and_the_sky-02-umi_no_kami_ni_kansha-2495-2524.mp3" "b/magnatune_compilation-rock-08-beth_quist_monsters-59-88.mp3" +"36438" "55144" "1802" "4" "34" "7" "5/arthur_yoria-of_the_lovely-08-my_best_routines-88-117.mp3" "1/jacob_heringman-holburns_passion-18-fantasia_lute-0-29.mp3" "f/jackalopes-jacksploitation-01-good_clean_fun_laundromat-30-59.mp3" +"40913" "54289" "37827" "0" "0" "2" "4/ensemble_mirable-triemer_six_cello_sonatas-09-sonata_2_minuetto_i-88-117.mp3" "a/jacob_heringman-black_cow-17-bakfark_fantasia-30-59.mp3" "d/various_artists-the_art_of_persuasion-08-the_fate_cargo_cult-117-146.mp3" +"36624" "13363" "40228" "3" "5" "16" "1/four_stones-chronic_dreams-08-ocean_in_her_head_spliff_mix-146-175.mp3" "f/kenji_williams-faces_of_epiphany-03-illusion_bedrock_mix-378-407.mp3" "f/jacob_heringman_and_catherine_king-alonso_mudarra_songs_and_solos-09-o_gelosia_de_amanti-0-29.mp3" +"9816" "43747" "5473" "1" "0" "0" "8/justin_bianco-phoenix-02-so_softly-88-117.mp3" "2/saros-soundscapes-10-prelude_3-0-29.mp3" "a/bjorn_fogelberg-karooshi_porn-01-trioxidation-262-291.mp3" +"2578" "54627" "52175" "59" "4" "7" "f/brad_sucks-i_dont_know_what_im_doing-01-making_me_nervous-0-29.mp3" "0/daniel_ben_pienaar-book_2_cd2_welltempered_clavier-17-prelude_and_fugue_no__21_in_bflat_major_bwv_890_praeludium-59-88.mp3" "9/cheryl_ann_fulton-the_airs_of_wales-14-the_welsh_ground-0-29.mp3" +"27347" "1757" "41280" "3" "1" "1" "f/ivilion-sartinal-06-hshante-233-262.mp3" "8/tim_rayborn-veils_of_light-01-gilim-0-29.mp3" "6/curandero-curandero-09-teddy_bear-30-59.mp3" +"48348" "17159" "27309" "1" "1" "0" "3/kitka-the_vine-12-mershkvaris_simghera_rimtitairi_georgia-59-88.mp3" "2/magnatune_remixed-ridin_the_faders_2-04-curl_my_cranium__lo_tag_blanco_and_fourstones-30-59.mp3" "c/william_brooks-fowl_mouth-06-hideaway-30-59.mp3" +"17483" "44596" "53909" "15" "18" "20" "5/dj_markitos-unreachable_destiny-04-element_of_truth-0-29.mp3" "e/solace-balance-10-tiger_moon_dance-146-175.mp3" "f/philharmonia_baroque_orchestra-handel__atalanta_cd2-16-recitativo_oh_del_crudo_mio_bene-30-59.mp3" +"8781" "38170" "31387" "3" "5" "5" "c/rapoon-easterly_6_or_7-02-our_tresspasses-175-204.mp3" "5/cheryl_ann_fulton-the_once_and_future_harp-08-virgin_light-0-29.mp3" "7/aba_structure-epic-07-erased-755-784.mp3" +"14627" "17471" "28952" "2" "2" "1" "f/dac_crowell-redshifted_harmonies-03-redshifted_harmonies-59-88.mp3" "b/belief_systems-eponyms-04-electro_kinetic-30-59.mp3" "3/etherfysh-box_of_fysh-06-sidewalk-320-349.mp3" +"9386" "31786" "52858" "2" "1" "0" "0/paul_berget-the_siena_manuscript_on_steel_string_guitar-02-recercar_2_steel_string_guitar-0-29.mp3" "e/c_layne-the_sun_will_come_out_to_blind_you-07-how_soon_i_forget-117-146.mp3" "5/kammen___swan-wild_wood-15-lullaby_set-204-233.mp3" +"21752" "30974" "6717" "1" "1" "0" "5/stargarden-step_off-05-china_green-494-523.mp3" "1/phebe_craig_and_katherine_westine-beside_themselves-07-couperin_musetes_de_choisi_et_de_taverni-146-175.mp3" "e/hans_christian-phantoms-02-coyotes_dance-117-146.mp3" +"9356" "4316" "46294" "0" "0" "1" "1/the_rajdhani_quartet-the_gandhi_memorial_concert-02-ragas_sindhu_bhairavi_and_gurjari_todi-668-697.mp3" "b/magnatune_compilation-rock-01-shane_jackman_didnt_i-88-117.mp3" "a/ensemble_sreteniye-ancient_church_singing_of_byzantine_georgia_and_rus-11-o_theotokos_the_virgin_rejoice_ave_maria-929-958.mp3" +"48043" "36352" "34380" "0" "1" "1" "a/da_camera-a_celtic_celebration-12-gavotta_summer__the_poppy_from_airs_for_the_seasons_osward-0-29.mp3" "e/solace-balance-08-miss_anime-30-59.mp3" "c/lvx_nova-lvx_nova-08-in_memory_of_magick_mick-407-436.mp3" +"37832" "22067" "7106" "1" "7" "2" "f/memories_of_tomorrow-waiting_for_dawn-08-the_games_you_play_trance_mix-30-59.mp3" "2/stellamara-star_of_the_sea-05-del_mar_rojo-30-59.mp3" "b/the_bots-now_is_the_time-02-electronic_paradise-30-59.mp3" +"7445" "55108" "4143" "0" "1" "0" "7/artemis-gravity-02-fountain_of_life-59-88.mp3" "a/james_edwards-le_tresor_dorphee_by_antoine_francisque-18-courante-0-29.mp3" "6/norine_braun-crow-01-salem-117-146.mp3" +"52931" "58140" "37834" "6" "8" "38" "d/janine_johnson-telemann_harpsichord_solos_from_der_getreue_music_meister-15-ouverture_burlesque_ouverture_a_la_polonaise_telemann-117-146.mp3" "b/hanneke_van_proosdij-harpsichord_suites_of_chambonnieres-26-suite_in_f_major__allemande-146-175.mp3" "f/memories_of_tomorrow-waiting_for_dawn-08-the_games_you_play_trance_mix-88-117.mp3" +"36219" "55292" "54503" "1" "0" "0" "2/drop_trio-big_dipper-08-leftys_alone-117-146.mp3" "0/daniel_ben_pienaar-book_2_cd2_welltempered_clavier-18-prelude_and_fugue_no__21_in_bflat_major_bwv_890_fuga-30-59.mp3" "9/the_strap_ons-geeking_crime-17-k_christ-30-59.mp3" +"41961" "33920" "27507" "0" "1" "0" "6/philharmonia_baroque-rameau_and_leclair-10-airs_de_demons_i__ii__iii__leclair_scylla_et_glaucus-30-59.mp3" "3/les_filles_de_sainte_colombe-german_music_for_viols_and_harpsichord-07-tombeau_de_m__blancheroche_froberger-233-262.mp3" "4/trip_wamsley-its_better_this_way-06-its_here_now-59-88.mp3" +"42175" "36300" "49336" "0" "4" "0" "c/reza_manzoori-restrung-10-breath-30-59.mp3" "6/dj_markitos-slower_emotions138_bpm_remixes-08-love_peace_and_ecstasy_138_bpm_remix-262-291.mp3" "e/justin_bianco-forge-12-war-146-175.mp3" +"3490" "904" "52002" "1" "1" "0" "b/the_bots-now_is_the_time-01-push-117-146.mp3" "a/da_camera-a_celtic_celebration-01-carolans_concerto_carolan_five_tunes_by_the_irish_harper-59-88.mp3" "3/c__layne-potemkin_villages-14-stiltz-59-88.mp3" +"7445" "43037" "6352" "21" "5" "2" "7/artemis-gravity-02-fountain_of_life-59-88.mp3" "4/rapoon-the_kirghiz_light__cd_1-10-jacobs_drum-88-117.mp3" "d/shiva_in_exile-ethnic-02-breathing-30-59.mp3" +"54000" "23729" "46403" "2" "0" "0" "6/doc_rossi-demarzi6_sonatas_for_cetra_o_kitara-16-sonata_iv_minuet-88-117.mp3" "5/dj_markitos-unreachable_destiny-05-obsession_forever-146-175.mp3" "5/four_stones_net-ridin_the_faders-11-part_11-117-146.mp3" +"17483" "45869" "2544" "2" "1" "2" "5/dj_markitos-unreachable_destiny-04-element_of_truth-0-29.mp3" "7/rapoon-what_do_you_suppose-11-i_dont_expect_anyone-407-436.mp3" "4/jami_sieber-hidden_sky-01-maenam-175-204.mp3" +"4873" "48605" "29251" "0" "2" "0" "d/various_artists-the_art_of_persuasion-01-subterranean_artemis-30-59.mp3" "0/jag-juke_joint_boogie-12-pearline-59-88.mp3" "7/paul_avgerinos-phos_hilaron-06-song_of_the_gladness-320-349.mp3" +"22072" "14875" "6281" "8" "29" "7" "2/stellamara-star_of_the_sea-05-del_mar_rojo-175-204.mp3" "0/william_brooks-bitter_circus-03-seven_promises-117-146.mp3" "5/mr_epic-sideways-02-blue_days-117-146.mp3" +"39434" "43074" "53909" "1" "11" "27" "a/plunkett-14_days-09-hold_tight-117-146.mp3" "4/justin_bianco-siren-10-journeys_twilight-30-59.mp3" "f/philharmonia_baroque_orchestra-handel__atalanta_cd2-16-recitativo_oh_del_crudo_mio_bene-30-59.mp3" +"45535" "2139" "49609" "0" "2" "0" "c/touchinggrace-happenstance-11-eastern_sky-262-291.mp3" "6/norine_braun-now_and_zen-01-jade-117-146.mp3" "a/jacob_heringman-black_cow-13-bakfark_non_accedat_ad_te_malum_secunda_pars-0-29.mp3" +"10846" "4847" "32069" "21" "3" "4" "5/thursday_group-uncle_mean-02-uncle_mean-233-262.mp3" "8/hybris-the_first_words-01-squalid_rooms-59-88.mp3" "2/mrdc-timecode-07-leaving-88-117.mp3" +"43602" "13184" "10913" "2" "0" "0" "6/strojovna_07-mirnix-10-out_clipping-175-204.mp3" "9/strojovna_07-iii-03-gvgh-175-204.mp3" "2/version-versions-02-universal_humans-262-291.mp3" +"8773" "41690" "35753" "1" "0" "0" "b/satori-journey_to_other_worlds-02-other_worlds-1596-1625.mp3" "7/falik-elvolution-09-what_not_things_seem_are_they-0-29.mp3" "4/dj_cary-downtempo_chill_2-08-huan_ying_mr_gelatine-30-59.mp3" +"31786" "9386" "52858" "3" "0" "0" "e/c_layne-the_sun_will_come_out_to_blind_you-07-how_soon_i_forget-117-146.mp3" "0/paul_berget-the_siena_manuscript_on_steel_string_guitar-02-recercar_2_steel_string_guitar-0-29.mp3" "5/kammen___swan-wild_wood-15-lullaby_set-204-233.mp3" +"40435" "30445" "5853" "3" "3" "10" "9/the_kokoon-berlin-09-passion_of_life-175-204.mp3" "0/ammonite-reconnection-07-angel_hold_on___-262-291.mp3" "6/ralph_meulenbroeks-gambomania-02-arabesque_martin_marais-117-146.mp3" +"36715" "56523" "23576" "4" "2" "7" "4/rapoon-the_kirghiz_light__cd_1-08-ora-233-262.mp3" "3/jag-four_strings-20-your_daddy_dont_know-59-88.mp3" "0/the_bots-truth-05-never_fall_in_love-146-175.mp3" +"29530" "40631" "27885" "7" "9" "35" "a/edward_martin_and_william_bastian-virtues_and_vices-06-tant_que_vivray_claudin_de_semisy-0-29.mp3" "4/paul_berget-the_siena_manuscript_on_renaissance_lute-09-recercar_9-0-29.mp3" "5/mrdc-plethora-06-lostcode-262-291.mp3" +"40036" "19350" "9362" "0" "1" "1" "a/jade_leary-the_lost_art_of_human_kindness-09-meaner_than_winter-175-204.mp3" "3/etherfysh-box_of_fysh-04-rama-1045-1074.mp3" "1/the_rajdhani_quartet-the_gandhi_memorial_concert-02-ragas_sindhu_bhairavi_and_gurjari_todi-842-871.mp3" +"50537" "37980" "40851" "0" "1" "2" "f/norine_braun-and_the_mood_swings-13-running_on_the_edge-204-233.mp3" "f/satori-healing_sounds_of_tibet-08-tibetan_trance-59-88.mp3" "6/hoxman-synthesis_of_five-09-snake_eye-0-29.mp3" +"54881" "29638" "54283" "1" "2" "1" "6/doc_rossi-demarzi6_sonatas_for_cetra_o_kitara-17-the_orange_rogue_trad__arr__rossi-117-146.mp3" "b/liquid_zen-elements_at_loop_10-06-the_surface-0-29.mp3" "e/philharmonia_baroque_orchestra-handel__atalanta_cd1-17-aria_riportai_glorioso_palma-88-117.mp3" +"43855" "2837" "21791" "1" "1" "0" "4/american_bach_soloists-j_s__bach__mass_in_b_minor_cd1-10-qui_sedes_ad_dextram_patris-59-88.mp3" "7/rocket_city_riot-pop_killer-01-mister_right-88-117.mp3" "4/paul_avgerinos-sky_of_grace-05-communion-0-29.mp3" +"4167" "23031" "5239" "3" "8" "6" "3/etherfysh-box_of_fysh-01-sanctuary-552-581.mp3" "4/seismic_anamoly-sweet_rock_candy-05-kick_in_tha_nuts-0-29.mp3" "d/rapoon-vernal_crossing-01-the_same_river_once-88-117.mp3" +"12292" "5987" "18512" "0" "3" "0" "0/american_bach_soloists-j_s__bach__transcriptions_of_italian_music-03-concerto_in_a_minor_for_four_harpsichords_bwv_1065_iii_allegro-146-175.mp3" "f/memories_of_tomorrow-waiting_for_dawn-02-after_thought-88-117.mp3" "2/shira_kammen-the_almanac-04-may_carol-88-117.mp3" +"9459" "43596" "37307" "2" "3" "2" "5/satori-for_relaxation-02-river_surround-349-378.mp3" "6/strojovna_07-mirnix-10-out_clipping-0-29.mp3" "1/etherine-24_days-08-singing_for_the_day-59-88.mp3" +"37083" "15072" "27985" "2" "0" "1" "0/paul_berget-the_siena_manuscript_on_steel_string_guitar-08-recercar_8_steel_string_guitar-59-88.mp3" "4/myles_cochran-marginal_street-03-so_gone-378-407.mp3" "1/william_brooks-karma_dogs-06-miracle-117-146.mp3" +"57624" "12131" "56718" "1" "0" "0" "b/la_primavera-english_renaissance_music-24-i_saw_my_lady_weepe_dowland-262-291.mp3" "1/suzanne_teng-mystic_journey-03-china_lily-233-262.mp3" "f/jacob_heringman_and_catherine_king-alonso_mudarra_songs_and_solos-21-pavana_4_course_guitar-30-59.mp3" +"40264" "27594" "26134" "0" "0" "1" "2/duo_chambure-vihuela_duets_of_valderrabano-09-obsecro_te_domina_josquin_dez_prez-233-262.mp3" "f/paul_berget-sl_weiss_on_11_strings-06-linfidele_suite_paysanne-117-146.mp3" "1/artemis-undone-06-beside_u-59-88.mp3" +"30136" "6" "54102" "2" "0" "0" "4/falling_you-touch-07-____a_cry_for_the_brokenhearted-117-146.mp3" "f/american_bach_soloists-j_s__bach_solo_cantatas-01-bwv54__i_aria-146-175.mp3" "d/katherine_roberts_perl-j_s__bach_french_suites-16-suite_no_3_in_b_minor_bwv_814_sarabande-204-233.mp3" +"28431" "30745" "2816" "4" "14" "14" "5/four_stones_net-ridin_the_faders-06-part_6-88-117.mp3" "3/spinecar-autophile-07-cant_sleep-0-29.mp3" "f/memories_of_tomorrow-waiting_for_dawn-01-memories_of_tomorrow-291-320.mp3" +"44689" "44918" "6034" "0" "0" "1" "7/rob_costlow-woods_of_chaos-10-twilight-88-117.mp3" "e/steven_devine-portrait_of_an_english_harpsichord-11-sonata_no_1_in_f_major_allegro_thomas_arne-0-29.mp3" "0/chris_harvey-the_white_sail-02-allegiance-146-175.mp3" +"56523" "36715" "23576" "6" "4" "6" "3/jag-four_strings-20-your_daddy_dont_know-59-88.mp3" "4/rapoon-the_kirghiz_light__cd_1-08-ora-233-262.mp3" "0/the_bots-truth-05-never_fall_in_love-146-175.mp3" +"52052" "56250" "10709" "1" "0" "1" "d/katherine_roberts_perl-j_s__bach_french_suites-14-suite_no_3_in_b_minor_bwv_814_allemande-88-117.mp3" "1/etherine-24_days-20-in_the_garden-30-59.mp3" "5/stargarden-the_art_of_analog_diversion-02-trybal-88-117.mp3" +"38270" "11073" "33781" "10" "5" "8" "0/the_bots-truth-08-where_has_our_love_gone-146-175.mp3" "1/zilla-egg-02-wicker_pilots-0-29.mp3" "0/william_brooks-bitter_circus-07-the_hanging_of_allen_scott_johnson-117-146.mp3" +"49393" "2979" "35226" "2" "0" "1" "f/various_artists-south_by_southwest_compilation-12-wreck_of_the_zephyr_drop_trio-117-146.mp3" "d/paul_avgerinos-balancing_spheres-01-night_illusions__the_longing-929-958.mp3" "4/american_bach_soloists-j_s__bach__mass_in_b_minor_cd1-08-domine_deus-88-117.mp3" +"53758" "19598" "6959" "1" "0" "2" "6/jeni_melia-the_lost_art_of_wooing-16-lady_careys_dump_anon-30-59.mp3" "9/suzanne_teng-enchanted_wind-04-septembers_angels-175-204.mp3" "e/solace-balance-02-dragon_and_sword-349-378.mp3" +"57776" "19397" "33513" "1" "0" "1" "c/the_art_of_the_lute_player-the_art_of_the_lute_player-24-tiento_del_segundo_tono__alonso_mudarra__jacob_heringman-0-29.mp3" "0/ammonite-reconnection-04-restrained_mind-146-175.mp3" "7/artemis-gravity-07-subterranean-204-233.mp3" +"21490" "24772" "51717" "5" "14" "6" "d/rapoon-vernal_crossing-05-bol_baya-204-233.mp3" "b/liquid_zen-elements_at_loop_10-05-spirit-59-88.mp3" "1/tim_rayborn-qadim-14-perishde__santur_persian-262-291.mp3" +"50052" "39653" "41805" "5" "6" "2" "6/tom_paul-i_was_king-13-i_wish_the_world_was_flat-59-88.mp3" "6/dj_markitos-slower_emotions138_bpm_remixes-09-inside_your_dreams_138_bpm_remix-146-175.mp3" "0/rocket_city_riot-last_of_the_pleasure_seekers-10-in_my_bones-30-59.mp3" +"30091" "8196" "15173" "1" "2" "0" "6/ralph_meulenbroeks-gambomania-07-la_reveuse_martin_marais-88-117.mp3" "b/solar_cycle-sunlight-02-like_it_2-88-117.mp3" "d/seth_and_maryse_carlin-schubert__music_for_fortepiano_four_hands-03-sonata_in_c_major_grand_duo__scherzo__allegro_vivace-204-233.mp3" +"11168" "607" "18213" "1" "0" "1" "e/dac_crowell-within_this_space-02-within_this_space-1451-1480.mp3" "c/jay_kishor-the_payans_concert-01-bhimpalasi-552-581.mp3" "c/seismic_anamoly-ramifications-04-julies_tune-320-349.mp3" +"16703" "342" "53972" "32" "4" "5" "1/dac_crowell-sferica-04-benthic-929-958.mp3" "8/jacob_heringman-josquin_des_prez_lute_settings-01-anon__praeter_rerum_seriem-117-146.mp3" "1/tanya_tomkins_and_eric_zivian-beethoven_piano_and_cello_works-16-sonata_in_d_major_op__102_no__2_adagio_con_molto_sentimento_daffetto-59-88.mp3" +"5805" "46161" "31451" "1" "0" "0" "3/c__layne-potemkin_villages-01-you_are_the_reason-146-175.mp3" "b/elizabeth_wolff-moments_musicaux-11-moritz_moszkowski__4_moments_musicaux_op__84__moderato_e_grazioso-30-59.mp3" "6/ed_martin-luis_milan__el_maestro-07-fantasia_7-0-29.mp3" +"34801" "36220" "23473" "1" "0" "1" "d/the_west_exit-nocturne-08-become_anyone-117-146.mp3" "2/drop_trio-big_dipper-08-leftys_alone-146-175.mp3" "c/jamie_janover-now_center_of_time-05-momentinnium-494-523.mp3" +"58763" "49355" "6273" "0" "5" "2" "8/jacob_heringman-blame_not_my_lute-38-the_paduane-0-29.mp3" "e/solace-balance-12-wither_my_lotus_flower-88-117.mp3" "5/john_fleagle-worlds_bliss__medieval_songs_of_love_and_death-02-blow_northerne_wynd-88-117.mp3" +"13951" "41537" "25877" "7" "0" "2" "e/jamie_janover_and_michael_masley-all_strings_considered-03-mnemonic_harmonics-320-349.mp3" "a/four_stones-la_vie_chill-09-u_meaning_u-204-233.mp3" "7/roots_of_rebellion-the_looking_glass-06-amnesia-175-204.mp3" +"20072" "15645" "46011" "16" "4" "23" "e/skitzo-heavy_shit-04-stab_her_goodnight-117-146.mp3" "4/norine_braun-modern_anguish-03-the_mystic-59-88.mp3" "0/ensemble_vermillian-stolen_jewels-11-kreiger_ricercar-146-175.mp3" +"39376" "21109" "9147" "4" "1" "4" "5/arthur_yoria-suerte_something-09-greek_archaic-88-117.mp3" "1/solace-the_gathering_season-05-aenaem-146-175.mp3" "a/bjorn_fogelberg-karooshi_porn-02-quite_derivative-88-117.mp3" +"42679" "47279" "5464" "2" "2" "10" "8/cargo_cult-vibrant-10-fifth-59-88.mp3" "f/glen_bledsoe-octopants-11-und_so_weiter_i-0-29.mp3" "a/bjorn_fogelberg-karooshi_porn-01-trioxidation-0-29.mp3" +"11904" "9312" "31108" "12" "4" "3" "d/belief_systems-eproms-03-boiling-175-204.mp3" "5/sitar-first_congregational_church_concert-02-raga_patdeep__gat__jay_kishor-1277-1306.mp3" "5/human_response-survival-07-distance-262-291.mp3" +"3260" "4502" "5983" "0" "2" "1" "8/justin_bianco-phoenix-01-phoenix-88-117.mp3" "2/ensemble_mirable-conversations_galantes-01-sonata_iii_in_d_minor_allegromoderato-88-117.mp3" "e/burning_babylon-stereo_mash_up-02-addis_red_dub-204-233.mp3" +"36220" "34801" "23473" "0" "2" "1" "2/drop_trio-big_dipper-08-leftys_alone-146-175.mp3" "d/the_west_exit-nocturne-08-become_anyone-117-146.mp3" "c/jamie_janover-now_center_of_time-05-momentinnium-494-523.mp3" +"50950" "26334" "1999" "0" "2" "0" "4/burnshee_thornside-the_art_of_not_blending_in-13-til_i_met_you-30-59.mp3" "a/somadrone-trancelucent-06-coda-146-175.mp3" "a/tilopa-pictures_of_silence-01-ichi-30-59.mp3" +"31752" "5636" "53032" "0" "2" "0" "e/tilopa-out_of_the_blue-07-heeyahoa-494-523.mp3" "b/seismic_anamoly-dead_mans_hand-01-walkin_the_line-146-175.mp3" "3/jan_hanford-24_preludes_for_solo_piano-15-prelude_no__15_in_a_minor-30-59.mp3" +"52656" "2443" "51700" "0" "0" "2" "c/strojovna_07-number_1-15-fajront-30-59.mp3" "5/thursday_group-uncle_mean-01-like_white_on_rice-117-146.mp3" "c/the_art_of_the_lute_player-the_art_of_the_lute_player-14-pavan_heres_paternus__antony_holborne__jacob_heringman-117-146.mp3" +"21993" "29873" "25776" "2" "7" "2" "d/paul_avgerinos-balancing_spheres-05-day_dreams__and_thirst_is_quenched-842-871.mp3" "e/touchinggrace-submission-06-watching_clouds-175-204.mp3" "f/magnatune-relaxation_spa-06-after_claire_fitch-146-175.mp3" +"17267" "33118" "31629" "15" "31" "7" "c/beatundercontrol-the_introduction-04-direction_dub-0-29.mp3" "c/seismic_anamoly-ramifications-07-serenade_for_samantha-0-29.mp3" "8/justin_bianco-phoenix-07-gift-30-59.mp3" +"36821" "30982" "28578" "4" "4" "0" "a/jade_leary-and_come_the_sirens-08-paul_gauguin-0-29.mp3" "f/norine_braun-and_the_mood_swings-07-cruel_streak-88-117.mp3" "6/mercy_machine-in_your_bed-06-quietly-88-117.mp3" +"49355" "58763" "6273" "3" "0" "3" "e/solace-balance-12-wither_my_lotus_flower-88-117.mp3" "8/jacob_heringman-blame_not_my_lute-38-the_paduane-0-29.mp3" "5/john_fleagle-worlds_bliss__medieval_songs_of_love_and_death-02-blow_northerne_wynd-88-117.mp3" +"35743" "45502" "9321" "2" "0" "1" "8/ivilion-terean-08-hthar-88-117.mp3" "a/asteria-soyes_loyal-11-dueil_angoisseux_gilles_binchois-233-262.mp3" "5/sitar-first_congregational_church_concert-02-raga_patdeep__gat__jay_kishor-1538-1567.mp3" +"27637" "5916" "24980" "0" "1" "0" "c/touchinggrace-happenstance-06-last_nights_dream_the_experience-175-204.mp3" "4/trip_wamsley-its_better_this_way-02-20_years_too_late-88-117.mp3" "c/touchinggrace-happenstance-05-sunday_driver-0-29.mp3" +"49155" "5659" "25574" "1" "0" "1" "c/william_brooks-blue_ribbon__the_best_of_william_brooks-12-the_hanging_of_allen_scott_johnson-175-204.mp3" "1/spinecar-up_from_the_mud-01-waste_away-0-29.mp3" "f/american_bach_soloists-j_s__bach_solo_cantatas-06-bwv82__iii_aria-320-349.mp3" +"25241" "41411" "11965" "3" "0" "1" "a/ensemble_sreteniye-ancient_church_singing_of_byzantine_georgia_and_rus-05-trisagion_holy_god_strochnoi_chant-59-88.mp3" "5/paul_avgerinos-muse_of_the_round_sky-09-the_night_sky-88-117.mp3" "8/kenji_williams-worldspirit_soundtrack-03-buddha_embryo-30-59.mp3" +"37428" "55029" "49015" "0" "0" "2" "6/doc_rossi-demarzi6_sonatas_for_cetra_o_kitara-08-sonata_ii_minuet-0-29.mp3" "b/paul_beier-alessandro_piccinini-18-aria_ii_di_sarauanda_in_parte_variate-59-88.mp3" "1/vito_paternoster-cd1bach_cello_suites-12-suite_ii_in_re_minore__gigue-88-117.mp3" +"49873" "14539" "5223" "0" "2" "1" "b/magnatune_compilation-rock-13-emmas_mini_disconnected-204-233.mp3" "d/shira_kammen-mistral-03-quand_jetais_jeune-59-88.mp3" "2/jesse_manno-sea_spirits-01-the_river-0-29.mp3" +"55029" "37428" "49015" "1" "0" "0" "b/paul_beier-alessandro_piccinini-18-aria_ii_di_sarauanda_in_parte_variate-59-88.mp3" "6/doc_rossi-demarzi6_sonatas_for_cetra_o_kitara-08-sonata_ii_minuet-0-29.mp3" "1/vito_paternoster-cd1bach_cello_suites-12-suite_ii_in_re_minore__gigue-88-117.mp3" +"56776" "17173" "25497" "4" "2" "12" "4/ensemble_mirable-triemer_six_cello_sonatas-21-sonata_5_largo-88-117.mp3" "4/paul_avgerinos-sky_of_grace-04-dance_of_life-175-204.mp3" "6/drop_trio-cezanne-05-wreck_of_the_zephyr-523-552.mp3" +"28099" "15956" "1385" "1" "1" "0" "5/paul_avgerinos-muse_of_the_round_sky-06-muse_of_the_round_sky-117-146.mp3" "b/janine_johnson-bach_goldberg_variations-03-variatio_2_a_1_clav-30-59.mp3" "d/magnatune_compilation-new_age_and_jazz-01-ehren_starks_the_tale_of_room_620-262-291.mp3" +"1743" "28712" "47770" "0" "0" "2" "6/stargarden-ambient_excursions-01-ghosts_of_ocean_gardens-146-175.mp3" "8/hybris-the_first_words-06-rotten_flowers-30-59.mp3" "6/jeni_melia-the_lost_art_of_wooing-12-come_away_come_sweet_love_john_dowland-30-59.mp3" +"9274" "46362" "36315" "0" "2" "1" "5/sitar-first_congregational_church_concert-02-raga_patdeep__gat__jay_kishor-175-204.mp3" "7/american_bach_soloists-j_s__bach__mass_in_b_minor_cd2-11-osanna_in_exelsis-0-29.mp3" "a/the_headroom_project-ciri_a_doro-08-male_voice_for_genang_drum-0-29.mp3" +"38715" "49519" "48593" "0" "0" "6" "6/sand_craig_and_dornenburg-biber_violin_sonatas-09-biber_rosary_sonata_14_in_d_major_the_assumption_of_mary-30-59.mp3" "1/tim_rayborn-qadim-13-amniat__robab_afghan-204-233.mp3" "9/strojovna_07-iii-12-partition-204-233.mp3" +"51176" "4161" "41762" "13" "15" "8" "c/strojovna_07-number_1-14-blaudisco-175-204.mp3" "3/etherfysh-box_of_fysh-01-sanctuary-378-407.mp3" "d/rapoon-vernal_crossing-09-yitun-291-320.mp3" +"43402" "54892" "43640" "7" "4" "30" "d/the_west_exit-nocturne-10-nine_lives-204-233.mp3" "d/drevo-christian_themes_in_ukrainian_folk_songs-17-through_the_wide_field_through_the_deep_sea-88-117.mp3" "c/liquid_zen-seventythree-10-passing_cars-0-29.mp3" +"6257" "49532" "6733" "0" "0" "2" "e/jamie_janover_and_michael_masley-all_strings_considered-02-birds_of_mindrise-146-175.mp3" "a/jade_leary-and_come_the_sirens-13-and_come_the_sirens-30-59.mp3" "f/professor_armchair-too_much_mustard-02-daddy_long_legs-59-88.mp3" +"35664" "40002" "42627" "1" "4" "10" "f/lizzi-love_and_you_and_i-08-gone-233-262.mp3" "8/mediva-viva_mediva-09-mandad_ei_comigo_cantiga_no_3-0-29.mp3" "1/pizzle-party_patrol-10-farrakorn-0-29.mp3" +"49580" "8580" "34190" "10" "4" "6" "b/dj_cary-power_synths-13-atom__instrumental_mix_trancevision-30-59.mp3" "4/dac_crowell-spctr-02-northeastern_corridor-610-639.mp3" "a/bjorn_fogelberg-karooshi_porn-07-wave-117-146.mp3" +"28622" "26264" "31774" "0" "0" "2" "6/dj_markitos-slower_emotions138_bpm_remixes-06-raving_illusion_138_bpm_remix-146-175.mp3" "2/indidginus-as_above_so_below-06-champa-30-59.mp3" "7/rapoon-what_do_you_suppose-07-how_many_of_you_did_not_know_that-523-552.mp3" +"19374" "43509" "5396" "5" "12" "14" "3/dj_cary-downtempo_chill-04-reentry_cargo_cult-117-146.mp3" "7/claire_fitch-ambiencellist_part_ii-10-oceanic_memories-0-29.mp3" "7/jag-cypress_grove_blues-01-train_train-59-88.mp3" +"12196" "45777" "30488" "1" "1" "1" "9/the_wretch-ambulatory-03-comfort-117-146.mp3" "5/domased-selection-11-he_is_leaving_us_blue_m_rmx-175-204.mp3" "0/apa_ya-apa_ya-07-apa_ya_pradha-0-29.mp3" +"58771" "14210" "40900" "0" "1" "0" "8/jacob_heringman-blame_not_my_lute-39-tiente_alora-0-29.mp3" "d/tim_rayborn-chordae-03-o_suavis-639-668.mp3" "0/jeffrey_luck_lucas-what_we_whisper-09-sometimes_sometimes-0-29.mp3" +"56164" "24844" "10323" "0" "1" "0" "9/janine_johnson-german_keyboard_masters-20-ciacona_in_e_buxtehude-88-117.mp3" "7/wicked_boy-the_treatment-05-strange_days-88-117.mp3" "8/tim_rayborn-veils_of_light-02-tabak-30-59.mp3" +"30170" "17103" "28433" "1" "0" "0" "c/rapoon-easterly_6_or_7-07-6_or_7-320-349.mp3" "f/american_baroque-the_four_seasons_by_vivaldi-04-concerto_no_2_in_g_minor_rv_315_summer__allegro_non_molto-175-204.mp3" "d/janine_johnson-telemann_harpsichord_solos_from_der_getreue_music_meister-06-partin_a_cembalo_solo_menuets_telemann-0-29.mp3" +"14328" "8644" "48706" "2" "18" "8" "f/glen_bledsoe-octopants-03-ppop-59-88.mp3" "3/american_baroque-mozart_4_quartets_for_strings_and_winds-02-oboe_quartet_in_f_k370__adagio-117-146.mp3" "c/soulprint-in_spite_of_it_all-12-putrid-146-175.mp3" +"58771" "40900" "14210" "1" "0" "1" "8/jacob_heringman-blame_not_my_lute-39-tiente_alora-0-29.mp3" "0/jeffrey_luck_lucas-what_we_whisper-09-sometimes_sometimes-0-29.mp3" "d/tim_rayborn-chordae-03-o_suavis-639-668.mp3" +"7035" "18230" "43358" "23" "6" "8" "9/indidginus-sine_language-02-dusty_lands-0-29.mp3" "0/jeffrey_luck_lucas-what_we_whisper-04-just_like_moths-146-175.mp3" "b/jacob_heringman-jane_pickeringes_lute_book-10-my_lord_willoughbies_welcom_home_by_mr_byrde-59-88.mp3" +"39954" "48850" "10250" "10" "14" "34" "f/heavy_mellow-acoustic_abstracts-09-long_ago_and_far_away-0-29.mp3" "4/orinda-airs_de_cour-12-si_je_languis_dun_martire_incogneu_joachim_thibaut_de_courville-0-29.mp3" "3/dj_cary-downtempo_chill-02-sunday_bed_curl-233-262.mp3" +"17012" "37696" "29056" "6" "21" "5" "1/ambient_teknology-phoenix-04-cloud_maker-465-494.mp3" "7/solace-rhythm_of_the_dance-08-sword_dance_routine-320-349.mp3" "6/falik-streaks_and_strokes-06-son_of_sand_flea-291-320.mp3" +"22528" "24295" "31104" "2" "0" "1" "6/philharmonia_baroque-rameau_and_leclair-05-gavottes__air_pour_les_pagodes__contredanse_en_roundeau_rameau_les_paladins-88-117.mp3" "9/the_kokoon-berlin-05-scout-117-146.mp3" "5/human_response-survival-07-distance-146-175.mp3" +"2308" "44657" "57629" "0" "1" "0" "0/american_bach_soloists-joseph_haydn__masses-01-kyrie__allegro_moderato-146-175.mp3" "f/ehren_starks-lines_build_walls-10-tunnel_systems-88-117.mp3" "c/magnatune-classical-24-la_primavera_robert_jones_sweet_kate-59-88.mp3" +"49228" "41133" "14233" "2" "1" "6" "8/daniel_berkman-calabashmoon-12-train_to_bamako-30-59.mp3" "b/ehren_starks-the_depths_of_a_year-09-subtle_groove-0-29.mp3" "3/american_baroque-mozart_4_quartets_for_strings_and_winds-03-oboe_quartet_in_f_k370__rondeau-59-88.mp3" +"40133" "15037" "11549" "0" "1" "0" "f/lizzi-love_and_you_and_i-09-my_destination-59-88.mp3" "1/spinecar-up_from_the_mud-03-smoke-0-29.mp3" "a/tilopa-by_the_way-03-amigos_de_viaje-204-233.mp3" +"34671" "54448" "47224" "2" "0" "0" "f/strojovna_07-switch_on__switch_off-08-anything_anywhere-175-204.mp3" "4/monks_and_choirs_of_kiev_pechersk_lavra-chants_of_the_russian_orthodox_church-17-great_doxology-436-465.mp3" "f/brad_sucks-i_dont_know_what_im_doing-11-time_to_take_out_the_trash-0-29.mp3" +"44532" "21321" "39220" "1" "0" "0" "6/falik-streaks_and_strokes-10-the_last_faery-0-29.mp3" "e/hans_christian-phantoms-05-atlantis-59-88.mp3" "8/beth_quist-lucidity-09-ferte_preza_na_prezarro-146-175.mp3" +"35818" "3525" "51041" "7" "8" "4" "9/william_brooks-silent_wings-08-i_will_stop_with_you-175-204.mp3" "d/kyiv_chamber_choir-praise_the_lord-01-rachmaninovpraise_the_lord_from_the_heavens-30-59.mp3" "9/ya_elah-each_of_us-13-yhi_ratzon-88-117.mp3" +"47104" "1129" "33781" "0" "1" "0" "6/tom_paul-i_was_king-11-the_best_in_me-59-88.mp3" "d/ambient_teknology-the_all_seeing_eye_project-01-cyclops-552-581.mp3" "0/william_brooks-bitter_circus-07-the_hanging_of_allen_scott_johnson-117-146.mp3" +"34326" "2451" "1862" "33" "11" "14" "c/jamie_janover-now_center_of_time-07-zeus___bruce_vamp-0-29.mp3" "5/thursday_group-uncle_mean-01-like_white_on_rice-349-378.mp3" "5/domased-selection-01-he_is_leaving_us-175-204.mp3" +"21336" "40740" "52054" "3" "9" "3" "9/janine_johnson-german_keyboard_masters-05-auf_das_heilige_pfingstfest_pachelbel-59-88.mp3" "f/ivilion-sartinal-09-sartinal_iii-175-204.mp3" "d/katherine_roberts_perl-j_s__bach_french_suites-14-suite_no_3_in_b_minor_bwv_814_allemande-146-175.mp3" +"9196" "31738" "25861" "4" "5" "16" "4/jay_kishor-amber-02-raga_malgunji__jor-842-871.mp3" "e/tilopa-out_of_the_blue-07-heeyahoa-88-117.mp3" "9/the_wretch-ambulatory-06-ambulatory-204-233.mp3" +"37165" "26546" "2021" "2" "0" "0" "0/voices_of_music-an_evening_with_bach-08-schlafe_mein_leibster_bwv_213-88-117.mp3" "6/strojovna_07-mirnix-06-d_a_r_k-233-262.mp3" "a/falik-dreams_from_the_machine-01-ika-59-88.mp3" +"40036" "9362" "19350" "0" "2" "0" "a/jade_leary-the_lost_art_of_human_kindness-09-meaner_than_winter-175-204.mp3" "1/the_rajdhani_quartet-the_gandhi_memorial_concert-02-ragas_sindhu_bhairavi_and_gurjari_todi-842-871.mp3" "3/etherfysh-box_of_fysh-04-rama-1045-1074.mp3" +"44180" "53857" "18594" "19" "4" "8" "1/william_brooks-karma_dogs-10-slacker_blues-204-233.mp3" "d/daniel_ben_pienaar-book_2_cd1_welltempered_clavier-16-prelude_and_fugue_no__8_in_dsharp_minor_bwv_877_fuga-88-117.mp3" "e/justin_bianco-forge-04-mischief-0-29.mp3" +"49185" "31128" "2750" "1" "0" "1" "3/emmas_mini-beat_generation_mad_trick-12-the_wait_is_over-30-59.mp3" "b/solar_cycle-sunlight-07-dream_of_you-146-175.mp3" "f/lizzi-love_and_you_and_i-01-me-233-262.mp3" +"27453" "35493" "44191" "12" "6" "3" "7/artemis-gravity-06-inception-146-175.mp3" "d/beth_quist-shall_we_dance-08-finale-320-349.mp3" "5/stargarden-the_art_of_analog_diversion-10-smooth-88-117.mp3" +"13363" "36624" "40228" "3" "3" "20" "f/kenji_williams-faces_of_epiphany-03-illusion_bedrock_mix-378-407.mp3" "1/four_stones-chronic_dreams-08-ocean_in_her_head_spliff_mix-146-175.mp3" "f/jacob_heringman_and_catherine_king-alonso_mudarra_songs_and_solos-09-o_gelosia_de_amanti-0-29.mp3" +"6185" "238" "13667" "6" "1" "4" "f/strojovna_07-dirnix-02-basetra_noje-59-88.mp3" "d/seismic_anamoly-afterburner-01-afterburner-117-146.mp3" "b/seismic_anamoly-dead_mans_hand-03-long_gone-0-29.mp3" +"56867" "27468" "55246" "0" "3" "0" "b/janine_johnson-bach_goldberg_variations-21-variatio_20_a_2_clav-88-117.mp3" "2/magnatune_compilation-electronica-06-indidginus_dusty_lands-262-291.mp3" "3/jacob_heringman-siena_lute_book-18-la_volunte_sandrin-117-146.mp3" +"29370" "17683" "47440" "5" "13" "5" "a/rhonda_lorence-winter_moon-06-steamy_river-59-88.mp3" "c/strojovna_07-number_1-04-flaki-262-291.mp3" "e/steven_devine-portrait_of_an_english_harpsichord-12-sonata_no_4_in_c_minor_allemande_joseph_kelway-88-117.mp3" +"32971" "2420" "22422" "1" "1" "2" "2/antiguru-fall_submissions-07-rectify-378-407.mp3" "c/liquid_zen-oscilloscope-01-levier_darmement-117-146.mp3" "d/shiva_in_exile-ethnic-05-floating-146-175.mp3" +"18681" "44782" "42925" "7" "4" "5" "5/trancevision-lemuria-04-nebula-30-59.mp3" "2/paul_avgerinos-gnosis-10-we_are_one-88-117.mp3" "5/burnshee_thornside-rock_this_moon-10-i_know_this_feeling-88-117.mp3" +"29561" "18581" "43292" "8" "10" "5" "d/utopia_banished-night_of_the_black_wyvern-06-the_darkness_we_burn-30-59.mp3" "2/anup-embrace-04-minor_song-0-29.mp3" "7/falik-elvolution-10-mediterranean_blue-262-291.mp3" +"52015" "57975" "37278" "1" "1" "3" "3/c__layne-potemkin_villages-14-stiltz-436-465.mp3" "d/janine_johnson-telemann_harpsichord_solos_from_der_getreue_music_meister-25-suite_courante_pezold-59-88.mp3" "d/processor-insomnia-08-shiraio_pig-117-146.mp3" +"13279" "48887" "33408" "0" "0" "2" "a/the_headroom_project-ciri_a_doro-03-hoyd_hoy_june-117-146.mp3" "a/falik-dreams_from_the_machine-12-son_of_sand_flea_remix-204-233.mp3" "4/the_marginal_prophets-bohemian_rap_cd-07-spank_you-88-117.mp3" +"35897" "50394" "8457" "0" "2" "0" "6/farallon_recorder_quartet-ludwig_senfl-08-in_domino_confido-204-233.mp3" "e/joram-moments_of_clarity-13-plenilune-262-291.mp3" "6/mercy_machine-in_your_bed-02-my_joan_of_arc-0-29.mp3" +"40913" "37827" "54289" "1" "0" "0" "4/ensemble_mirable-triemer_six_cello_sonatas-09-sonata_2_minuetto_i-88-117.mp3" "d/various_artists-the_art_of_persuasion-08-the_fate_cargo_cult-117-146.mp3" "a/jacob_heringman-black_cow-17-bakfark_fantasia-30-59.mp3" +"39434" "53909" "43074" "6" "17" "10" "a/plunkett-14_days-09-hold_tight-117-146.mp3" "f/philharmonia_baroque_orchestra-handel__atalanta_cd2-16-recitativo_oh_del_crudo_mio_bene-30-59.mp3" "4/justin_bianco-siren-10-journeys_twilight-30-59.mp3" +"18472" "2833" "14859" "3" "2" "16" "6/drop_trio-cezanne-04-luna-88-117.mp3" "8/cargo_cult-vibrant-01-mirrored_image-175-204.mp3" "b/oberlin_consort_of_viols-5_and_6_part_fantasies_of_william_lawes-03-set_a_5_in_f_major-233-262.mp3" +"12381" "27052" "47377" "1" "1" "0" "6/dj_markitos-slower_emotions138_bpm_remixes-03-cyber_evolution_138_bpm_remix-262-291.mp3" "c/rapoon-easterly_6_or_7-06-falling_more_slowly-349-378.mp3" "c/liquid_zen-seventythree-11-wobble_into_venus-349-378.mp3" +"49519" "38715" "48593" "2" "2" "3" "1/tim_rayborn-qadim-13-amniat__robab_afghan-204-233.mp3" "6/sand_craig_and_dornenburg-biber_violin_sonatas-09-biber_rosary_sonata_14_in_d_major_the_assumption_of_mary-30-59.mp3" "9/strojovna_07-iii-12-partition-204-233.mp3" +"15290" "7261" "1224" "0" "3" "1" "9/domased-slowdown-03-sound_of_ambient_part_1-175-204.mp3" "1/richard_savino-murcia__danza_y_diferencias-02-fandango-117-146.mp3" "4/human_response-delirious-01-delirious-291-320.mp3" +"31234" "18106" "40259" "1" "1" "0" "a/jade_leary-and_come_the_sirens-07-earthwish_on_saturn-30-59.mp3" "2/jamie_janover-evolutions-04-interlocken-117-146.mp3" "2/duo_chambure-vihuela_duets_of_valderrabano-09-obsecro_te_domina_josquin_dez_prez-88-117.mp3" +"35979" "52820" "12045" "2" "2" "0" "8/mercy_machine-in_your_bed__the_remixes-08-invisible_cosmic_sea_shanty_mix-349-378.mp3" "d/kyiv_chamber_choir-praise_the_lord-15-lvovof_thy_mystical_supper-59-88.mp3" "e/dac_crowell-within_this_space-03-cantillation-1016-1045.mp3" +"34876" "18135" "10696" "1" "1" "0" "4/monks_and_choirs_of_kiev_pechersk_lavra-chants_of_the_russian_orthodox_church-08-bless_is_the_man_authentic_kiev_chant-88-117.mp3" "6/mercy_machine-in_your_bed__instrumental_mix-04-invisible_instrumental_mix-117-146.mp3" "c/telemann_trio_berlin-telemann_trio_berlin-02-triosonate_fur_flute_violine_und_basso__allegro_telemann-0-29.mp3" +"2833" "18472" "14859" "2" "4" "17" "8/cargo_cult-vibrant-01-mirrored_image-175-204.mp3" "6/drop_trio-cezanne-04-luna-88-117.mp3" "b/oberlin_consort_of_viols-5_and_6_part_fantasies_of_william_lawes-03-set_a_5_in_f_major-233-262.mp3" +"25699" "15184" "2619" "0" "0" "1" "b/philharmonia_baroque-beethoven_symphonies_no_3_eroica_and_no_8-06-8_symph_3rd-117-146.mp3" "4/seth_carlin-schubert__works_for_solo_fortepiano-03-sonata_in_eflat_major_opus_posth__122__menuetto_allegretto-30-59.mp3" "4/jay_kishor-the_color_of_night-01-malkauns-1045-1074.mp3" +"43565" "27871" "52571" "0" "1" "0" "4/tim_rayborn-the_path_beyond-10-osman_pasha-291-320.mp3" "4/dj_markitos-evolution_of_the_mind-06-losing_control-175-204.mp3" "7/jag-cypress_grove_blues-15-cypress_boogie-0-29.mp3" +"56338" "6094" "10625" "13" "0" "1" "0/daniel_ben_pienaar-book_2_cd2_welltempered_clavier-20-prelude_and_fugue_no__22_in_bflat_minor_bwv_891_fuga-88-117.mp3" "8/skitzo-hellavator_musick-02-angels_blood-117-146.mp3" "f/jackalopes-jacksploitation-02-those_great_big____memories-88-117.mp3" +"48587" "11772" "46912" "0" "0" "1" "9/strojovna_07-iii-12-partition-30-59.mp3" "8/william_brooks-buffalo_treason-03-barn_near_a_big_house-117-146.mp3" "1/solace-the_gathering_season-11-sudan-233-262.mp3" +"25211" "48228" "37173" "3" "1" "12" "2/mrdc-timecode-05-timecode-146-175.mp3" "1/solace-the_gathering_season-12-journeys_end-146-175.mp3" "0/voices_of_music-an_evening_with_bach-08-schlafe_mein_leibster_bwv_213-320-349.mp3" +"9459" "37307" "43596" "3" "3" "1" "5/satori-for_relaxation-02-river_surround-349-378.mp3" "1/etherine-24_days-08-singing_for_the_day-59-88.mp3" "6/strojovna_07-mirnix-10-out_clipping-0-29.mp3" +"20177" "39386" "10538" "0" "2" "7" "d/katherine_roberts_perl-j_s__bach_french_suites-04-suite_no_4_in_e_flat_major_bwv_815_gavotte-30-59.mp3" "7/jeni_melia-the_last_of_old_england-09-green_bushes_trad-88-117.mp3" "4/rapoon-the_kirghiz_light__cd_1-02-the_temple_shakes-59-88.mp3" +"35979" "12045" "52820" "3" "0" "3" "8/mercy_machine-in_your_bed__the_remixes-08-invisible_cosmic_sea_shanty_mix-349-378.mp3" "e/dac_crowell-within_this_space-03-cantillation-1016-1045.mp3" "d/kyiv_chamber_choir-praise_the_lord-15-lvovof_thy_mystical_supper-59-88.mp3" +"11633" "23780" "3914" "0" "0" "2" "0/apa_ya-apa_ya-03-antare_e_dhu-175-204.mp3" "e/yongen-moonrise-05-one-204-233.mp3" "5/sitar-first_congregational_church_concert-01-raga_patdeep__alap__jay_kishor-1509-1538.mp3" +"39386" "20177" "10538" "1" "2" "6" "7/jeni_melia-the_last_of_old_england-09-green_bushes_trad-88-117.mp3" "d/katherine_roberts_perl-j_s__bach_french_suites-04-suite_no_4_in_e_flat_major_bwv_815_gavotte-30-59.mp3" "4/rapoon-the_kirghiz_light__cd_1-02-the_temple_shakes-59-88.mp3" +"40002" "35664" "42627" "3" "3" "7" "8/mediva-viva_mediva-09-mandad_ei_comigo_cantiga_no_3-0-29.mp3" "f/lizzi-love_and_you_and_i-08-gone-233-262.mp3" "1/pizzle-party_patrol-10-farrakorn-0-29.mp3" +"45860" "29527" "16281" "7" "29" "12" "7/rapoon-what_do_you_suppose-11-i_dont_expect_anyone-146-175.mp3" "c/glen_bledsoe-up_and_down-06-tango_cacafuego-146-175.mp3" "6/ralph_meulenbroeks-gambomania-04-sonata_in_d_minor_adagio_karl_friedrich_abel-59-88.mp3" +"56164" "10323" "24844" "0" "0" "2" "9/janine_johnson-german_keyboard_masters-20-ciacona_in_e_buxtehude-88-117.mp3" "8/tim_rayborn-veils_of_light-02-tabak-30-59.mp3" "7/wicked_boy-the_treatment-05-strange_days-88-117.mp3" +"20576" "55772" "26478" "7" "4" "0" "1/spinecar-passive_aggressive-04-true-262-291.mp3" "3/jag-four_strings-19-helena_street_corner_blues-30-59.mp3" "f/strojovna_07-switch_on__switch_off-06-crystal_durgamata_remix-175-204.mp3" +"23808" "4975" "9194" "3" "3" "13" "b/cargo_cult-alchemy-05-our_song-0-29.mp3" "b/solar_cycle-sunlight-01-sunlight-88-117.mp3" "4/jay_kishor-amber-02-raga_malgunji__jor-784-813.mp3" +"12739" "6033" "5192" "3" "13" "26" "c/hands_upon_black_earth-hands_upon_black_earth-03-effigy-146-175.mp3" "0/chris_harvey-the_white_sail-02-allegiance-117-146.mp3" "0/william_brooks-bitter_circus-01-the_gift-88-117.mp3" +"28288" "21983" "51177" "5" "5" "15" "9/lisa_debenedictis-tigers-06-ocean_in_her_head-117-146.mp3" "d/paul_avgerinos-balancing_spheres-05-day_dreams__and_thirst_is_quenched-552-581.mp3" "c/strojovna_07-number_1-14-blaudisco-204-233.mp3" +"21950" "21534" "364" "1" "1" "1" "b/ehren_starks-the_depths_of_a_year-05-dads_song-233-262.mp3" "4/swar_prabhat-designer_bliss-05-brahmodbhav-436-465.mp3" "c/jamie_janover-now_center_of_time-01-arc-204-233.mp3" +"3260" "5983" "4502" "0" "2" "0" "8/justin_bianco-phoenix-01-phoenix-88-117.mp3" "e/burning_babylon-stereo_mash_up-02-addis_red_dub-204-233.mp3" "2/ensemble_mirable-conversations_galantes-01-sonata_iii_in_d_minor_allegromoderato-88-117.mp3" +"27333" "6638" "56469" "10" "5" "4" "a/kitka-nectar-06-hopp_ide_tisztan_hungary-146-175.mp3" "b/philharmonia_baroque-mozart_orchestral_works-02-concerto_for_flute_and_orchestra_in_g_major_kv313_allegro-320-349.mp3" "c/o_fickle_fortune-a_celebration_of_robert_burns-20-the_sun_he_is_sunk_in_the_west-30-59.mp3" +"31198" "50159" "38582" "7" "10" "3" "5/kammen___swan-wild_wood-07-dunmore_lasses-175-204.mp3" "6/norine_braun-now_and_zen-13-lucky_13-30-59.mp3" "a/liquid_zen-magic_midsummer-09-arabia_underwater-262-291.mp3" +"28222" "51566" "56370" "7" "29" "6" "e/atomic_opera-penguin_dust-06-november-117-146.mp3" "8/philharmonia_baroque-cd2_scarlatti__cecilian_vespers-14-magnificat__gloria_patri-30-59.mp3" "e/magnatune_compilation-high_energy_rock_and_roll-20-skitzo_angels_blood-0-29.mp3" +"41492" "17881" "45650" "1" "1" "0" "6/jeni_melia-the_lost_art_of_wooing-09-toccata_no_6_in_f_major_hieronymus_kapsberger-88-117.mp3" "0/beth_quist-silver-04-grace-204-233.mp3" "e/solace-balance-11-feitian-175-204.mp3" +"30982" "36821" "28578" "5" "3" "0" "f/norine_braun-and_the_mood_swings-07-cruel_streak-88-117.mp3" "a/jade_leary-and_come_the_sirens-08-paul_gauguin-0-29.mp3" "6/mercy_machine-in_your_bed-06-quietly-88-117.mp3" +"29370" "47440" "17683" "1" "3" "11" "a/rhonda_lorence-winter_moon-06-steamy_river-59-88.mp3" "e/steven_devine-portrait_of_an_english_harpsichord-12-sonata_no_4_in_c_minor_allemande_joseph_kelway-88-117.mp3" "c/strojovna_07-number_1-04-flaki-262-291.mp3" +"28952" "17471" "14627" "1" "2" "0" "3/etherfysh-box_of_fysh-06-sidewalk-320-349.mp3" "b/belief_systems-eponyms-04-electro_kinetic-30-59.mp3" "f/dac_crowell-redshifted_harmonies-03-redshifted_harmonies-59-88.mp3" +"23793" "32232" "50605" "4" "8" "9" "3/dj_cary-eastern_grooves-05-oriental_distortionshiva_in_exile-59-88.mp3" "2/indidginus-as_above_so_below-07-machu-88-117.mp3" "2/ensemble_mirable-conversations_galantes-13-sonata_ii_in_b_minor_ariagratioso_altro-88-117.mp3" +"21897" "26678" "19853" "0" "1" "0" "6/curandero-curandero-05-corriendo_juntos-117-146.mp3" "e/hans_christian-phantoms-06-desperado-204-233.mp3" "1/phebe_craig_and_katherine_westine-beside_themselves-04-soler_conceierto_ii_in_a_minor_andante-175-204.mp3" +"35019" "3381" "21471" "2" "2" "1" "5/rapoon-cidar-08-cimbala-494-523.mp3" "6/barry_phillips-trad-01-polska_fran_glava-59-88.mp3" "6/falik-streaks_and_strokes-05-bliss-175-204.mp3" +"39836" "18423" "44559" "0" "1" "0" "2/shira_kammen-the_almanac-09-le_lai_de_la_rose-0-29.mp3" "9/domased-slowdown-04-long_journey-30-59.mp3" "7/jeni_melia-the_last_of_old_england-10-the_low_low_lands_of_holland_trad-117-146.mp3" +"9596" "19771" "3019" "1" "1" "0" "4/swar_prabhat-designer_bliss-02-saraswati_vandana-262-291.mp3" "9/curl-inner-04-sincerely_sorry-88-117.mp3" "1/touchinggrace-the_reformation_sessions-01-november_gale-59-88.mp3" +"2968" "27443" "33712" "1" "2" "0" "d/paul_avgerinos-balancing_spheres-01-night_illusions__the_longing-610-639.mp3" "5/mr_epic-sideways-06-in-59-88.mp3" "9/cheryl_ann_fulton-the_airs_of_wales-07-the_dimpled_cheek-0-29.mp3" +"4333" "23901" "44579" "1" "0" "2" "2/magnatune_remixed-ridin_the_faders_2-01-shining_star_games_az_egan_csillag_lenni__fourstones-30-59.mp3" "5/thursday_group-uncle_mean-05-pelican_fan-349-378.mp3" "e/john_jackson-bad_things_happen_all_the_time-10-through_the_glass-30-59.mp3" +"50207" "7785" "38252" "1" "3" "2" "b/cargo_cult-alchemy-13-matt-204-233.mp3" "2/aba_structure-tektonik_illusion-02-illusion-233-262.mp3" "0/williamson-a_few_things_to_hear_before_we_all_blow_up-08-whats_on_the_ceiling_beats_whats_on_tv-291-320.mp3" +"49371" "8182" "58152" "2" "2" "2" "e/briddes_roune-lenten_is_come-12-worldes_blis-30-59.mp3" "7/monoide-zeitpunkt-02-letzter_vorschlag-0-29.mp3" "d/janine_johnson-telemann_harpsichord_solos_from_der_getreue_music_meister-26-suite_sarabande_pezold-88-117.mp3" +"37795" "13068" "11263" "18" "17" "43" "8/hybris-the_first_words-08-the_choice_i_never_had-117-146.mp3" "b/dj_cary-power_synths-03-from_love_2_wicked_allstars-204-233.mp3" "0/american_bach_soloists-j_s__bach__cantatas_volume_v-03-gleichwie_der_regen_und_schnee_vom_himmel_fallt_bwv_18_iii_recitative_and_litany__mein_gott_hier_wird_mein_herze_sein-59-88.mp3" +"36904" "19353" "41821" "0" "0" "1" "a/janine_johnson-chopin_recital-08-polonaise_op_44_in_f_minor-552-581.mp3" "3/etherfysh-box_of_fysh-04-rama-1132-1161.mp3" "f/the_headroom_project-jetuton_andawai-10-late_night_blues-146-175.mp3" +"46456" "27316" "37064" "6" "2" "20" "0/beth_quist-silver-11-planet-291-320.mp3" "a/mr_gelatine-electroluv-06-hiphop110-30-59.mp3" "1/tim_rayborn-ashek-08-ravan-639-668.mp3" +"15187" "52981" "32559" "0" "0" "4" "4/seth_carlin-schubert__works_for_solo_fortepiano-03-sonata_in_eflat_major_opus_posth__122__menuetto_allegretto-117-146.mp3" "9/american_baroque-dances_and_suites_of_rameau_and_couperin-15-passepieds_12_suite_from_les_fetes_dhebe_rameau-117-146.mp3" "6/dj_markitos-slower_emotions138_bpm_remixes-07-ocean_of_the_emotions_138_bpm_remix-175-204.mp3" +"36917" "37631" "3770" "1" "1" "0" "6/curandero-curandero-08-prayer-291-320.mp3" "d/katherine_roberts_perl-j_s__bach_french_suites-08-suite_no_2_in_c_minor_bwv_813_allemande-204-233.mp3" "c/jay_kishor-cd2_the_sowebo_concert-01-raga_malkauns-3655-3684.mp3" +"22072" "6281" "14875" "6" "19" "15" "2/stellamara-star_of_the_sea-05-del_mar_rojo-175-204.mp3" "5/mr_epic-sideways-02-blue_days-117-146.mp3" "0/william_brooks-bitter_circus-03-seven_promises-117-146.mp3" +"47939" "52433" "4296" "28" "64" "52" "5/kammen___swan-wild_wood-12-eu_chorei-0-29.mp3" "8/magnatune-red_hat_summit_compilation-15-beat_under_control__blue_lights-320-349.mp3" "b/oberlin_consort_of_viols-5_and_6_part_fantasies_of_william_lawes-01-set_a_6_in_c_major-407-436.mp3" +"17661" "16441" "50974" "2" "0" "0" "a/liquid_zen-magic_midsummer-04-fire_below-30-59.mp3" "b/jacob_heringman-jane_pickeringes_lute_book-04-almaine_by_francis_cuttinge-30-59.mp3" "9/kiev_seminary_choir-hymns_of_the_allnight_vigil-13-troparion_to_the_st__fathers_of_pecherskaya_lavra-0-29.mp3" +"42899" "915" "41391" "24" "22" "12" "c/william_brooks-blue_ribbon__the_best_of_william_brooks-10-hideaway-175-204.mp3" "6/gerard_satamian-dry_fig_trees-01-chansons_sans_paroles_1989_op__2_pastorale-30-59.mp3" "6/mercy_machine-the_devil_i_know-09-the_man_i_create-146-175.mp3" +"41537" "13951" "25877" "0" "8" "2" "a/four_stones-la_vie_chill-09-u_meaning_u-204-233.mp3" "e/jamie_janover_and_michael_masley-all_strings_considered-03-mnemonic_harmonics-320-349.mp3" "7/roots_of_rebellion-the_looking_glass-06-amnesia-175-204.mp3" +"45731" "23987" "19521" "6" "8" "1" "2/drop_trio-big_dipper-11-gin_and_nothin-146-175.mp3" "2/zephyrus-angelus-05-quem_vidistis_parores_cipriano_de_rore-117-146.mp3" "7/aba_structure-epic-04-scrambling_to_stay_ahead-262-291.mp3" +"38057" "50779" "21334" "4" "4" "32" "c/telemann_trio_berlin-telemann_trio_berlin-08-triosonate_in_ddur__allegro_un_poco_c_p__bach-175-204.mp3" "2/vito_paternoster-cd2bach_cello_suites-13-suite_v_in_do_minore__prelude-88-117.mp3" "9/janine_johnson-german_keyboard_masters-05-auf_das_heilige_pfingstfest_pachelbel-0-29.mp3" +"10757" "29305" "6848" "3" "0" "22" "7/dac_crowell-the_sea_and_the_sky-02-umi_no_kami_ni_kansha-958-987.mp3" "f/rapoon-tin_of_drum-06-southbound-929-958.mp3" "5/new_york_consort_of_viols-dances_and_canzonas_of_holborne_and_brade-02-des_rothschencken_tanz_brade-59-88.mp3" +"43593" "37823" "8865" "23" "7" "3" "a/plunkett-14_days-10-out_at_sea-146-175.mp3" "d/various_artists-the_art_of_persuasion-08-the_fate_cargo_cult-0-29.mp3" "8/stargarden-music_for_modern_listening-02-perdido-262-291.mp3" +"49590" "21116" "2443" "7" "3" "5" "c/edward_martin-art_of_the_lute_in_renaissance_france-13-aupres_de_vous_sermisy-30-59.mp3" "1/solace-the_gathering_season-05-aenaem-349-378.mp3" "5/thursday_group-uncle_mean-01-like_white_on_rice-117-146.mp3" +"11785" "55629" "10349" "2" "0" "0" "f/rapoon-tin_of_drum-03-beneath_the_sky-175-204.mp3" "9/janine_johnson-german_keyboard_masters-19-canzona_in_e_buxtehude-59-88.mp3" "1/touchinggrace-the_reformation_sessions-02-taking_flight-407-436.mp3" +"50949" "3574" "36474" "0" "0" "2" "4/burnshee_thornside-the_art_of_not_blending_in-13-til_i_met_you-0-29.mp3" "1/the_rajdhani_quartet-the_gandhi_memorial_concert-01-raga_hansa_kalyani-1277-1306.mp3" "c/strojovna_07-number_1-08-na_bednicke_stal-117-146.mp3" +"23034" "56164" "2003" "1" "0" "0" "4/seismic_anamoly-sweet_rock_candy-05-kick_in_tha_nuts-88-117.mp3" "9/janine_johnson-german_keyboard_masters-20-ciacona_in_e_buxtehude-88-117.mp3" "a/tilopa-pictures_of_silence-01-ichi-146-175.mp3" +"37286" "39679" "26497" "3" "5" "2" "2/claire_fitch-ambiencellist-08-shortest_day-59-88.mp3" "0/william_brooks-bitter_circus-09-is_there_anybody_there-59-88.mp3" "b/lisa_debenedictis-mixter_one-06-cuckoo_passiveaggressive_mix_by_hisboyelroy-146-175.mp3" +"32971" "22422" "2420" "2" "0" "1" "2/antiguru-fall_submissions-07-rectify-378-407.mp3" "d/shiva_in_exile-ethnic-05-floating-146-175.mp3" "c/liquid_zen-oscilloscope-01-levier_darmement-117-146.mp3" +"13741" "54178" "13747" "1" "3" "1" "f/magnatune-relaxation_spa-03-march_thirtyone_falling_you-0-29.mp3" "3/jag-pretty_girl_blues-16-the_waterfall-117-146.mp3" "f/magnatune-relaxation_spa-03-march_thirtyone_falling_you-175-204.mp3" +"15930" "46279" "5421" "0" "2" "1" "8/ivilion-terean-03-urtulitreator-204-233.mp3" "a/ensemble_sreteniye-ancient_church_singing_of_byzantine_georgia_and_rus-11-o_theotokos_the_virgin_rejoice_ave_maria-494-523.mp3" "f/dac_crowell-redshifted_harmonies-01-tranquilitatis-581-610.mp3" +"28144" "43820" "50678" "5" "13" "5" "d/paul_avgerinos-maya__the_great_katun-06-night_of_the_goddess_part_2-378-407.mp3" "6/grayson_wray-picassos_dream-10-pure_delight-117-146.mp3" "c/vito_paternoster-cd1bach_sonatas_and_partitas_for_solo_violin-13-sonata_seconda_in_re_minore__grave-204-233.mp3" +"38270" "33781" "11073" "7" "12" "8" "0/the_bots-truth-08-where_has_our_love_gone-146-175.mp3" "0/william_brooks-bitter_circus-07-the_hanging_of_allen_scott_johnson-117-146.mp3" "1/zilla-egg-02-wicker_pilots-0-29.mp3" +"49995" "18968" "8884" "8" "3" "2" "9/lisa_debenedictis-tigers-13-girl_and_supergirl-59-88.mp3" "6/paul_beier-michelagnolo_galilei-04-passemezzo_and_saltarello-233-262.mp3" "9/musica_franca-corrette__le_phenix__les_delices_de_la_solitude-02-phenix__adagio-30-59.mp3" +"18423" "39836" "44559" "2" "0" "0" "9/domased-slowdown-04-long_journey-30-59.mp3" "2/shira_kammen-the_almanac-09-le_lai_de_la_rose-0-29.mp3" "7/jeni_melia-the_last_of_old_england-10-the_low_low_lands_of_holland_trad-117-146.mp3" +"10810" "32332" "34856" "3" "2" "4" "7/dac_crowell-the_sea_and_the_sky-02-umi_no_kami_ni_kansha-2495-2524.mp3" "f/tilopa-turkishauch-07-meifrui_ii-320-349.mp3" "b/magnatune_compilation-rock-08-beth_quist_monsters-59-88.mp3" +"19542" "23321" "16244" "1" "0" "1" "f/magnatune-relaxation_spa-04-secret_solution_michael_masley-175-204.mp3" "7/claire_fitch-ambiencellist_part_ii-05-longest_day-0-29.mp3" "0/american_bach_soloists-j_s__bach__cantatas_volume_v-04-gleichwie_der_regen_und_schnee_vom_himmel_fallt_bwv_18_iv_aria__mein_seelenschatz_ist_gottes_wort-146-175.mp3" +"45166" "10653" "12158" "1" "0" "1" "a/janine_johnson-chopin_recital-11-ballade_op_23_in_g_minor-262-291.mp3" "4/justin_bianco-siren-02-token-0-29.mp3" "2/solace-ahsas-03-circle_58_68_78-146-175.mp3" +"4071" "2486" "56693" "19" "3" "21" "3/dj_cary-downtempo_chill-01-ruff_and_tumble_mr_epic-233-262.mp3" "9/etherine-gleam-01-lost-262-291.mp3" "9/american_baroque-dances_and_suites_of_rameau_and_couperin-21-les_fauvetes_plaintives_xiveme_ordre_couperin-146-175.mp3" +"17220" "36931" "56532" "3" "1" "2" "d/paul_avgerinos-balancing_spheres-04-day_dreams__the_flames_are_awakened-88-117.mp3" "e/mountain_mirrors-mountain_mirrors-08-praying_mantis-117-146.mp3" "e/steven_devine-portrait_of_an_english_harpsichord-21-suite_no_5_in_e_major_allemande_george_frederick_handel-146-175.mp3" +"44782" "18681" "42925" "4" "7" "5" "2/paul_avgerinos-gnosis-10-we_are_one-88-117.mp3" "5/trancevision-lemuria-04-nebula-30-59.mp3" "5/burnshee_thornside-rock_this_moon-10-i_know_this_feeling-88-117.mp3" +"25211" "37173" "48228" "1" "11" "5" "2/mrdc-timecode-05-timecode-146-175.mp3" "0/voices_of_music-an_evening_with_bach-08-schlafe_mein_leibster_bwv_213-320-349.mp3" "1/solace-the_gathering_season-12-journeys_end-146-175.mp3" +"27211" "11300" "1987" "3" "4" "0" "3/sherefe-opium-06-geldim_bir_kara_tasha-175-204.mp3" "c/lvx_nova-lvx_nova-03-kyoto_nights-639-668.mp3" "e/c_layne-the_sun_will_come_out_to_blind_you-01-i_dont_care_if_you_lie-146-175.mp3" +"23415" "14931" "8748" "2" "0" "0" "2/roots_of_rebellion-surfacing-05-messenger-30-59.mp3" "4/jay_kishor-the_color_of_night-03-shivranjani-146-175.mp3" "b/satori-journey_to_other_worlds-02-other_worlds-871-900.mp3" +"50537" "40851" "37980" "0" "0" "1" "f/norine_braun-and_the_mood_swings-13-running_on_the_edge-204-233.mp3" "6/hoxman-synthesis_of_five-09-snake_eye-0-29.mp3" "f/satori-healing_sounds_of_tibet-08-tibetan_trance-59-88.mp3" +"24772" "21490" "51717" "11" "3" "7" "b/liquid_zen-elements_at_loop_10-05-spirit-59-88.mp3" "d/rapoon-vernal_crossing-05-bol_baya-204-233.mp3" "1/tim_rayborn-qadim-14-perishde__santur_persian-262-291.mp3" +"2878" "50177" "13057" "1" "2" "6" "f/satori-healing_sounds_of_tibet-01-moon_night-175-204.mp3" "8/philharmonia_baroque-cd2_scarlatti__cecilian_vespers-13-magnificat__fecit_potentiam-59-88.mp3" "b/the_bots-now_is_the_time-03-freak-117-146.mp3" +"19482" "20327" "46159" "0" "11" "2" "4/paul_berget-j_s__bach_on_the_lute-04-sarabande__bwv_996-0-29.mp3" "6/dr_kuch-analog_disease-04-the_big_bang_theory-59-88.mp3" "a/kitka-nectar-11-miskolc_felol_hidegen_fuj_a_szel_hungary-175-204.mp3" +"48887" "13279" "33408" "0" "1" "0" "a/falik-dreams_from_the_machine-12-son_of_sand_flea_remix-204-233.mp3" "a/the_headroom_project-ciri_a_doro-03-hoyd_hoy_june-117-146.mp3" "4/the_marginal_prophets-bohemian_rap_cd-07-spank_you-88-117.mp3" +"23321" "19542" "16244" "0" "0" "1" "7/claire_fitch-ambiencellist_part_ii-05-longest_day-0-29.mp3" "f/magnatune-relaxation_spa-04-secret_solution_michael_masley-175-204.mp3" "0/american_bach_soloists-j_s__bach__cantatas_volume_v-04-gleichwie_der_regen_und_schnee_vom_himmel_fallt_bwv_18_iv_aria__mein_seelenschatz_ist_gottes_wort-146-175.mp3" +"47390" "17122" "1628" "0" "0" "1" "6/falik-streaks_and_strokes-11-xanthanon-146-175.mp3" "d/ambient_teknology-the_all_seeing_eye_project-04-confusion_says-175-204.mp3" "8/magnatune-red_hat_summit_compilation-01-fluid__headphones-88-117.mp3" +"2515" "1548" "39508" "1" "0" "0" "2/mrdc-timecode-01-lust-146-175.mp3" "6/ed_martin-luis_milan__el_maestro-01-fantasia_1-30-59.mp3" "7/rocket_city_riot-pop_killer-09-hypodermic-30-59.mp3" +"7699" "23357" "26561" "2" "0" "2" "9/the_kokoon-berlin-02-how_do_i_work_this-30-59.mp3" "9/lisa_debenedictis-tigers-05-lowell-117-146.mp3" "4/rapoon-the_kirghiz_light__cd_1-06-dala-30-59.mp3" +"1743" "47770" "28712" "0" "1" "2" "6/stargarden-ambient_excursions-01-ghosts_of_ocean_gardens-146-175.mp3" "6/jeni_melia-the_lost_art_of_wooing-12-come_away_come_sweet_love_john_dowland-30-59.mp3" "8/hybris-the_first_words-06-rotten_flowers-30-59.mp3" +"10715" "21421" "16804" "0" "2" "0" "5/stargarden-the_art_of_analog_diversion-02-trybal-262-291.mp3" "c/five_star_fall-automatic_ordinary-05-between_2_floors-204-233.mp3" "8/mercy_machine-mercy_machine-04-bones-146-175.mp3" +"47678" "35224" "33174" "0" "0" "1" "7/american_bach_soloists-j_s__bach__mass_in_b_minor_cd2-12-benedictus-117-146.mp3" "4/american_bach_soloists-j_s__bach__mass_in_b_minor_cd1-08-domine_deus-30-59.mp3" "c/william_brooks-fowl_mouth-07-silent_wings-204-233.mp3" +"78" "35976" "7807" "7" "2" "1" "9/american_bach_soloists-heinrich_schutz__musicalische_exequien-01-musicalische_exequien_swv_279_teil_i_concert_in_form_einer_teutschen_begrabnismissa-1132-1161.mp3" "8/mercy_machine-in_your_bed__the_remixes-08-invisible_cosmic_sea_shanty_mix-262-291.mp3" "0/solace-iman-02-iman-146-175.mp3" +"19598" "53758" "6959" "0" "0" "5" "9/suzanne_teng-enchanted_wind-04-septembers_angels-175-204.mp3" "6/jeni_melia-the_lost_art_of_wooing-16-lady_careys_dump_anon-30-59.mp3" "e/solace-balance-02-dragon_and_sword-349-378.mp3" +"41233" "27950" "31068" "9" "1" "13" "9/the_sarasa_ensemble-a_baroque_mosaic-09-sweeter_than_roses_purcell-59-88.mp3" "9/william_brooks-silent_wings-06-maybe_meagan-146-175.mp3" "4/human_response-delirious-07-denial-233-262.mp3" +"46621" "47738" "30446" "19" "16" "6" "e/jade_leary-fossildawn-11-retroscope-175-204.mp3" "b/magnatune_compilation-rock-12-cargo_cult_our_song-0-29.mp3" "0/ammonite-reconnection-07-angel_hold_on___-291-320.mp3" +"6638" "27333" "56469" "13" "4" "2" "b/philharmonia_baroque-mozart_orchestral_works-02-concerto_for_flute_and_orchestra_in_g_major_kv313_allegro-320-349.mp3" "a/kitka-nectar-06-hopp_ide_tisztan_hungary-146-175.mp3" "c/o_fickle_fortune-a_celebration_of_robert_burns-20-the_sun_he_is_sunk_in_the_west-30-59.mp3" +"48043" "34380" "36352" "1" "2" "0" "a/da_camera-a_celtic_celebration-12-gavotta_summer__the_poppy_from_airs_for_the_seasons_osward-0-29.mp3" "c/lvx_nova-lvx_nova-08-in_memory_of_magick_mick-407-436.mp3" "e/solace-balance-08-miss_anime-30-59.mp3" +"50307" "5783" "40683" "26" "4" "5" "e/magnatune_com-magnatune_at_the_cc_salon-13-one_at_a_time_burnshee_thornside-117-146.mp3" "a/rhonda_lorence-winter_moon-01-winter_moon-233-262.mp3" "d/beth_quist-shall_we_dance-09-ritual-291-320.mp3" +"35019" "21471" "3381" "4" "1" "5" "5/rapoon-cidar-08-cimbala-494-523.mp3" "6/falik-streaks_and_strokes-05-bliss-175-204.mp3" "6/barry_phillips-trad-01-polska_fran_glava-59-88.mp3" +"54126" "10128" "52490" "0" "2" "0" "1/vito_paternoster-cd1bach_cello_suites-16-suite_vi_in_re_magiore__sarabande-117-146.mp3" "3/spinecar-autophile-02-stay-30-59.mp3" "b/richard_savino-mertz__bardic_sounds-15-childrens_fairy_tale-175-204.mp3" +"17267" "31629" "33118" "13" "9" "33" "c/beatundercontrol-the_introduction-04-direction_dub-0-29.mp3" "8/justin_bianco-phoenix-07-gift-30-59.mp3" "c/seismic_anamoly-ramifications-07-serenade_for_samantha-0-29.mp3" +"34045" "15160" "8537" "2" "0" "0" "c/glen_bledsoe-up_and_down-07-up_and_down-117-146.mp3" "5/edward_martin_and_paul_berget-baroque_lute_duets-03-sonata_in_c_major_weiss__largo-117-146.mp3" "a/tilopa-pictures_of_silence-02-ni-117-146.mp3" +"33662" "54392" "39327" "2" "0" "1" "d/seismic_anamoly-afterburner-07-ten_million_tears-436-465.mp3" "2/maryse_carlin-rameau__pieces_de_clavecin_en_concerts__forqueray_suites_4_and_5-17-fifth_suite___la_boisson_forqueray-117-146.mp3" "b/richard_savino-mertz__bardic_sounds-09-gondoliers_song-88-117.mp3" +"2515" "39508" "1548" "0" "0" "3" "2/mrdc-timecode-01-lust-146-175.mp3" "7/rocket_city_riot-pop_killer-09-hypodermic-30-59.mp3" "6/ed_martin-luis_milan__el_maestro-01-fantasia_1-30-59.mp3" +"56250" "52052" "10709" "1" "1" "0" "1/etherine-24_days-20-in_the_garden-30-59.mp3" "d/katherine_roberts_perl-j_s__bach_french_suites-14-suite_no_3_in_b_minor_bwv_814_allemande-88-117.mp3" "5/stargarden-the_art_of_analog_diversion-02-trybal-88-117.mp3" +"46491" "28319" "7744" "1" "1" "1" "7/artemis-gravity-11-prayer-233-262.mp3" "2/ensemble_sreteniye___three_holies_church_choristers-dont_cry_rachael-06-open_to_me_the_doors_of_repentance_byzantine_chant_bulgarian_tradition-30-59.mp3" "6/mercy_machine-the_devil_i_know-02-i_need_a_new_jesus-146-175.mp3" +"19697" "44230" "41409" "2" "9" "11" "a/tilopa-pictures_of_silence-04-shi-204-233.mp3" "9/musica_franca-corrette__le_phenix__les_delices_de_la_solitude-10-sonata_iii_in_c_major__allemanda-59-88.mp3" "5/paul_avgerinos-muse_of_the_round_sky-09-the_night_sky-30-59.mp3" +"12843" "33304" "29707" "1" "0" "4" "b/richard_savino-mertz__bardic_sounds-03-evening_song-146-175.mp3" "4/seth_carlin-schubert__works_for_solo_fortepiano-07-sonata_in_a_major_opus_posth__120__allegro-320-349.mp3" "f/memories_of_tomorrow-waiting_for_dawn-06-trance_of_life-0-29.mp3" +"18557" "40792" "50294" "1" "11" "1" "1/touchinggrace-the_reformation_sessions-04-melon_tropic_sunrise-378-407.mp3" "4/john_williams-long_ride_home-09-she_walks-30-59.mp3" "2/shira_kammen-the_almanac-13-o_western_wind-59-88.mp3" +"26234" "6954" "10728" "0" "1" "0" "1/tim_rayborn-qadim-06-calliopeia__kithara_ancient_greek-146-175.mp3" "e/solace-balance-02-dragon_and_sword-204-233.mp3" "7/dac_crowell-the_sea_and_the_sky-02-umi_no_kami_ni_kansha-117-146.mp3" +"55783" "47739" "36335" "26" "8" "3" "9/american_baroque-dances_and_suites_of_rameau_and_couperin-19-le_rossignol_en_amour_xiveme_ordre_couperin-0-29.mp3" "b/magnatune_compilation-rock-12-cargo_cult_our_song-30-59.mp3" "7/roots_of_rebellion-the_looking_glass-08-messenger-175-204.mp3" +"21950" "37093" "43565" "1" "0" "0" "b/ehren_starks-the_depths_of_a_year-05-dads_song-233-262.mp3" "4/seismic_anamoly-sweet_rock_candy-08-refleections-146-175.mp3" "4/tim_rayborn-the_path_beyond-10-osman_pasha-291-320.mp3" +"16180" "24201" "24006" "6" "2" "2" "e/sun_palace-into_heaven-03-your_hands_lie_open-204-233.mp3" "2/anup-embrace-05-romance_with_nature-262-291.mp3" "4/jay_kishor-amber-05-raga_malgunji__tabla_solo_in_jhaptaal-146-175.mp3" +"31068" "27950" "41233" "5" "2" "13" "4/human_response-delirious-07-denial-233-262.mp3" "9/william_brooks-silent_wings-06-maybe_meagan-146-175.mp3" "9/the_sarasa_ensemble-a_baroque_mosaic-09-sweeter_than_roses_purcell-59-88.mp3" +"14233" "41133" "49228" "3" "2" "5" "3/american_baroque-mozart_4_quartets_for_strings_and_winds-03-oboe_quartet_in_f_k370__rondeau-59-88.mp3" "b/ehren_starks-the_depths_of_a_year-09-subtle_groove-0-29.mp3" "8/daniel_berkman-calabashmoon-12-train_to_bamako-30-59.mp3" +"23106" "41415" "44869" "12" "0" "3" "4/tilopa-kyotaku_live-05-kyo_rei-349-378.mp3" "5/paul_avgerinos-muse_of_the_round_sky-09-the_night_sky-204-233.mp3" "c/liquid_zen-oscilloscope-10-zeitgeist-146-175.mp3" +"6020" "24470" "8305" "11" "1" "1" "d/ambient_teknology-the_all_seeing_eye_project-02-all_seeing_eye-146-175.mp3" "e/touchinggrace-submission-05-sitar_study-204-233.mp3" "f/tilopa-turkishauch-02-meifrui_i-204-233.mp3" +"9196" "25861" "31738" "3" "18" "9" "4/jay_kishor-amber-02-raga_malgunji__jor-842-871.mp3" "9/the_wretch-ambulatory-06-ambulatory-204-233.mp3" "e/tilopa-out_of_the_blue-07-heeyahoa-88-117.mp3" +"14328" "48706" "8644" "5" "4" "19" "f/glen_bledsoe-octopants-03-ppop-59-88.mp3" "c/soulprint-in_spite_of_it_all-12-putrid-146-175.mp3" "3/american_baroque-mozart_4_quartets_for_strings_and_winds-02-oboe_quartet_in_f_k370__adagio-117-146.mp3" +"5221" "8949" "20497" "1" "1" "0" "7/roots_of_rebellion-the_looking_glass-01-the_order-175-204.mp3" "c/jamie_janover-now_center_of_time-02-playa-639-668.mp3" "e/atomic_opera-penguin_dust-04-thirst-175-204.mp3" +"50779" "36049" "19859" "1" "0" "8" "2/vito_paternoster-cd2bach_cello_suites-13-suite_v_in_do_minore__prelude-88-117.mp3" "1/phebe_craig_and_katherine_westine-beside_themselves-08-js_bach_concerto_in_c_major_for_2_harpsichords_allegro_moderato-436-465.mp3" "8/cargo_cult-vibrant-04-son_of_ambriel-30-59.mp3" +"9321" "45502" "35743" "0" "0" "3" "5/sitar-first_congregational_church_concert-02-raga_patdeep__gat__jay_kishor-1538-1567.mp3" "a/asteria-soyes_loyal-11-dueil_angoisseux_gilles_binchois-233-262.mp3" "8/ivilion-terean-08-hthar-88-117.mp3" +"6717" "30974" "21752" "0" "2" "0" "e/hans_christian-phantoms-02-coyotes_dance-117-146.mp3" "1/phebe_craig_and_katherine_westine-beside_themselves-07-couperin_musetes_de_choisi_et_de_taverni-146-175.mp3" "5/stargarden-step_off-05-china_green-494-523.mp3" +"6992" "5453" "25493" "3" "11" "2" "5/domased-selection-02-drunk_warrior-146-175.mp3" "f/dac_crowell-redshifted_harmonies-01-tranquilitatis-1509-1538.mp3" "6/drop_trio-cezanne-05-wreck_of_the_zephyr-407-436.mp3" +"50394" "35897" "8457" "1" "0" "0" "e/joram-moments_of_clarity-13-plenilune-262-291.mp3" "6/farallon_recorder_quartet-ludwig_senfl-08-in_domino_confido-204-233.mp3" "6/mercy_machine-in_your_bed-02-my_joan_of_arc-0-29.mp3" +"12088" "8446" "34711" "3" "1" "1" "1/dac_crowell-sferica-03-chapel_hill_phantom_lantern-59-88.mp3" "8/mercy_machine-mercy_machine-02-my_fathers_hand-262-291.mp3" "c/liquid_zen-oscilloscope-08-autumn_glide-59-88.mp3" +"55772" "20576" "26478" "5" "4" "0" "3/jag-four_strings-19-helena_street_corner_blues-30-59.mp3" "1/spinecar-passive_aggressive-04-true-262-291.mp3" "f/strojovna_07-switch_on__switch_off-06-crystal_durgamata_remix-175-204.mp3" +"27594" "40264" "26134" "0" "1" "1" "f/paul_berget-sl_weiss_on_11_strings-06-linfidele_suite_paysanne-117-146.mp3" "2/duo_chambure-vihuela_duets_of_valderrabano-09-obsecro_te_domina_josquin_dez_prez-233-262.mp3" "1/artemis-undone-06-beside_u-59-88.mp3" +"48898" "6776" "8678" "42" "14" "23" "e/seth_carlin-mozart_in_the_age_of_enlightenment-12-sonata_15_in_c_minor__allegro_georg_benda-117-146.mp3" "d/paul_avgerinos-maya__the_great_katun-02-dawn_of_the_gods_part_2-262-291.mp3" "0/beth_quist-silver-02-om_asatoma_sad_gamaya-117-146.mp3" diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/AnnotDB.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/AnnotDB.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,385 @@ +% The thesaurus class is a basic component of all +% genre and tag information managing the whole Vocabulary. + +classdef AnnotDB < handle + + % public properties + properties (SetAccess = private) + lexicon = {}; + + end + + properties(Hidden, Access = private) + + annotsdb; % a numowners x numannots sparse binary / prob matrix + annots_oid; % ownerid to pos in annots conversion + + binary = 0; % indicator whether the db contains binary or scored annots + end + + methods + + % --- + % simple constructor + % --- + function db = AnnotDB(lexicon, annots, ids) + % db = AnnotDB(lexicon, annots, annotation_ids) + % lexicon: the list of all individual annotation elements or + % clips_by_annot or + % lexids_by_clip + + % annots: either a clips x numel(lexicon) binary matrix or + % ... + % + % annotation_ids: clip ids for the binary case, + + if nargin >= 1 + + % --- + % NOTE: two ways of supplying the annots are allowed: + % 1. clip ids for each lexical element + % 2. binary matrix + % --- + if ischar(lexicon) + + if strcmp(lexicon, 'clips_by_annot') + + % --- + % preset the lexicon and hash ids + % --- + db.lexicon = unique(lower(annots)); + + if iscell(ids) + db.annots_oid = unique([ids{:}]); + else + db.annots_oid = unique(ids); + end + + db.annotsdb = sparse(numel(db.annots_oid),... + numel( db.lexicon)); + +% for all annotations + for i = 1:numel(annots) + +% for all ids in set + % is this a cell or just a single index< + if iscell(ids) + for j = 1:numel(ids{i}) + + db.add_pair(ids{i}(j), annots{i}); + end + else + % single ndex case + db.add_pair(ids(i), annots{i}); + end + end + + elseif strcmp(lexicon, 'annots_by_clip') + + end + % this is the binary case + else + + db.lexicon = lexicon; + if nargin >= 2 + + db.annotsdb = sparse(annots); + db.annots_oid = ids; + else + db.annotsdb = sparse(0, numel(db.lexicon)); + end + end + end + end + + % --- + % retrieve annot-substructure for given clip ids, + % collecting std = [or = all] ,[and = common] + % annots for these + % --- + function new_db = subset(db, ownerids, mode) + % new_db = subset(db, ownerids, {'and', ['or']}) + + if nargin < 3 + mode = 'or'; + end + + % --- + % create new DB + % we make sure the tag id index keeps + % the same for subsets by copying the whole + % lexicon + % --- + new_db = AnnotDB(db.lexicon); + + switch lower(mode) + case 'and' + + % --- + % TODO: implement this and + % improve speed below + % --- + case 'or' + + % successively fill with given annots + for i = 1:numel(ownerids) + + % --- + % we retrieve annots for each clip + % and add them to the new database + % --- + [annot, score] = annots(db, ownerids(i)); + for j = 1:numel(annot) + + new_db.add_pair(ownerids(i), annot{j}, score(j)); + end + end + otherwise + error 'illegal owner id combination mode. possibly forgot brackets'; + end + end + + % retrieve annot-substructure for complement + % of given clip ids + function [new_db] = exclude(db, ownerids) + + % get complement of clip ids + ownerids = setdiff(db.annots_oid, ownerids); + + new_db = subset(db, ownerids); + end + + % --- + % retrieve clip by annot. + % if multiple annots are given, the clips + % containing all of them (logical and) are + % returned + % --- + function oids = owner(db, annotstr, mode) + + if nargin < 3 + mode = 'and'; + end + + if ~iscell(annotstr) + annotstr = {annotstr}; + end + + annotid = []; + for i = 1:numel(annotstr) + + annotid = [annotid strcellfind(db.lexicon, annotstr{i})]; + end + + oids = owner_for_annotid(db, annotid, mode); + end + + % retrieve owner ids by clip + function ownerids = owner_for_annotid(db, annotid, mode) + % ownerids = ownerids_for_annotid(db, annotid, {['and'], 'or'}) + + if nargin < 3 + mode = 'and'; + end + + switch lower(mode) + case 'or' + % search for all appearing owners + candidates = sum(db.annotsdb(:, annotid), 2) > 0; + + case 'and' + % search for the common owners + candidates = sum(db.annotsdb(:, annotid), 2) == ... + numel(annotid); + otherwise + error 'illegal tag combination mode'; + end + + + % get positions in database + pos = find(candidates); + + % return owner ids + ownerids = db.annots_oid(pos); + end + + % retrieve annotid by clip + function [aid, score] = annotids_for_owner(db, ownerid, mode) + + % single query case + if numel(ownerid) == 1 + + pos = owner_pos(db, ownerid); + + % get positions in database + aid = find(db.annotsdb(pos, :) > 0); + + score = db.annotsdb(pos, aid); + + + % sort ids for output + if ~db.binary + + [score, idx] = sort(score, 'descend'); + aid = aid(idx); + end + else + if nargin < 3 + mode = 'or'; + end + % --- + % the query contained multiple ids + % + % we dont return the single results but + % the statistics for this subset of clips + % --- + + new_db = db.subset(ownerid, mode); + [null, score, aid] = new_db.stats; + end + end + + % retrieve annotation by clip + function [out, score, aid] = annots(db, ownerid) + + [aid, score] = db.annotids_for_owner( ownerid); + + out = db.get_annot_name(aid); + end + + + % retrieve annot name given a annot id + function out = get_annot_name(db, annotid) + + out = {}; + for i = 1:numel(annotid) + + out{i} = db.lexicon{annotid(i)}; + end + end + + % return annotation id for annotation string + function aid = get_annot_id(db, annotstr) + + if ~iscell(annotstr) + + % expensive search within annot list + aid = strcellfind(db.lexicon, annotstr); + else + + % search seperately for each annot + for i = 1:numel(annotstr) + aid(i) = strcellfind(db.lexicon, annotstr{i}); + end + end + end + + % --- + % return statistics on saved annotations. + % = returns the sum of the scores and + % sortec lexicon + % --- + function [labels, score, annotids] = stats(db) + + % out = zeros(1, size(db.annotsdb,2)); + score = full(sum(db.annotsdb, 1)); + [score, annotids] = sort(score,'descend'); + + % prepare labels + labels = db.lexicon(annotids); + end + + % this is a stub for a tag cloud-like output + function [out] = annots_cloud(db, ownerid) + + % --- + % TODO: actually output tag-cloud + % this output is aimed at input into a web interface + % we successfully used http://www.wordle.net/ + % --- + + if nargin > 1 + db2 = db.subset(ownerid); + else + db2 = db; + end + + [labels, score, annotids] = stats(db2); + + % --- + % Note: for performance issues we compress this data + % to a maximum value of 1001 + % --- + score = ceil((score./max(score))*100); + + out = ''; + for i = 1:numel(annotids) + + % repeat the tag according to score + annot = strrep(labels{i},' ','-'); + for j = 1:score(i) + out = sprintf('%s; %s',annot, out); + end + end + end + + + function out = size(db) + + out = numel(db.lexicon); + end + end + + + methods(Hidden) + + function add_pair(db, ownerid, annot, score) +% add_pair(db, owner, annot) + + if nargin < 4 + score = 1; + end + + aid = strcellfind(db.lexicon, annot); + + % create new position for annotation if neccesary + if isempty(aid) + + aid = numel(db.lexicon) + 1; + + % add to lexicon + db.lexicon = {db.lexicon{:}, annot}; + + % enhance annotation matrix + db.annotsdb = [db.annotsdb, ... + zeros(size(db.annotsdb,1), 1)]; + end + + + % create new position for clip if neccesary + pos = owner_pos(db, ownerid); + if isempty(pos) + + pos = numel(db.annots_oid) +1; + + % add to oid + db.annots_oid = [db.annots_oid, ownerid]; + + % enhance annotation matrix + db.annotsdb = [db.annotsdb; ... + zeros(1, size(db.annotsdb, 2))]; + end + + % save data to database + db.annotsdb(pos, aid) = score; + end + + + function pos = owner_pos(db, ownerid) + + % returns database position for owner id + pos = find(db.annots_oid == ownerid); + end + end + +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/ClipComparedGraph.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/ClipComparedGraph.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,57 @@ +% --- +% class ClipComparedGraph +% Directed graph representing comparative clip similarities. +% +% each node represents a clip +% an edge a -> b is present if clipas a and b cooccur +% in a comparison triplet +% +% --- + +classdef ClipComparedGraph < Graph + +% --- +% the methods +% --- +methods + + +% constructor of the graph +function G = ClipComparedGraph(comparison, comparison_ids) + + if nargin == 2 + % --- + % handle automatic or manual input + % --- + for i = 1:size(comparison,1) + + % get clips and votes + clips = comparison_ids(comparison(i,1:3)); + + % edges + G.add_edge(clips(1), clips(2), 1); + G.add_edge(clips(1), clips(3), 1); + G.add_edge(clips(2), clips(3), 1); +% +% % symmectric edges +% G.add_edge(clips(2), clips(1), 1); +% G.add_edge(clips(3), clips(1), 1); +% G.add_edge(clips(3), clips(2), 1); + end + + elseif nargin == 1 + + % --- + % Add the input graph to the empty one + % --- + G.add_graph(comparison); + + end + +% end constructor function +end + +end +end + + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/ClipPairGraph.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/ClipPairGraph.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,455 @@ +% --- +% mother class for the directed clip graph classes +% contains general purpose and graph-theorethical +% functions +% --- +% we consider the similarity between clips +% as symmetric, thus sim(a,b) = sim(b,a) +% this makes the set of nodes much smaller +% --- + +% --- +% TODO: change structure to +% avoid using Vclips, mapping clip ids +% into a node number 00id1..00id2, +% +% Enables full integration into Digraph / Graph class +% --- + +classdef ClipPairGraph < DiGraph + + properties(Hidden) + + max_clipid = 10^6; + vid_map; + vid_last = 0; + end + +methods + +% --- +% Constructor +% --- +function G = ClipPairGraph(comparison) + + if nargin == 0 + + G.vid_map = containers.Map('KeyType', 'double', 'ValueType', 'double'); + + elseif nargin == 1 + + G = ClipPairGraph(); + + % --- + % Add the input graph to the empty one + % --- + G.add_graph(comparison); + + clas = cat(1, class(comparison), superclasses(comparison)); + if strcellfind(clas, 'ClipPairGraph') + + G.vid_map = comparison.vid_map; + end + end +end + +% --- +% Node id by clip +% --- +function Vid = node(G, a, b) + + % --- + % hash function for clip pair + % --- + + % sort clip ids + sm = min([a b]); + bg = max([a b]); + + % --- + % get node code: + % max_clipid * bg + sm + % --- + hash = G.max_clipid *bg + sm; + + % --- + % table lookup + % --- + if G.vid_map.isKey(hash) + + Vid = G.vid_map(hash); + else + + % --- + % create new psition in hash table and + % assign this ID to the node + % --- + G.vid_last = G.vid_last + 1; + Vid = G.vid_last; + + G.vid_map(hash) = Vid; + + % --- + % NOTE: this is intended for maybe swithcingto + % cell descriptions in future. + % The node ids will be stored as well + % --- + G.Vlbl{Vid} = G.label(Vid); + end +end + +% --- +% Clip ids for Node +% --- +function [sm, bg] = clipsN(G, Vid) + + % --- + % TODO: There must be a more efficient way to do this + % --- + % get all hash data + + all_hashs = G.vid_map.values(); + all_keys = G.vid_map.keys(); + % --- + % search for this Node ID and return hash value + % --- + hash = all_keys{cell2mat(all_hashs) == Vid}; + + sm = mod(hash, G.max_clipid); + bg = div(hash, G.max_clipid); +end + + +% --- +% Edge weight by clip id +% --- +function [weight, V1, V2] = edge(G, a, b, c) + + if nargin == 4 + V1 = add_node(G, a, b); + V2 = add_node(G, a, c); + + elseif nargin == 3 + V1 = a; + V2 = b; + end + + weight = edge@DiGraph(G, V1, V2); +end + +function [weight, varargout] = edges(G,a,b) + + % all edges or from specified node ? + if nargin >= 2 + + % is there a clip pair or node number input? + if nargin == 3 + V1 = add_node(G, a, b); + + elseif nargin == 2 + V1 = a; + end + + [weight, V1, V2] = edges@DiGraph(G, V1); + + else + % --- + % ok, get ALL edges + % --- + [weight, V1, V2] = edges@DiGraph(G); + end + + % how to represent the output + if nargout <= 3 + varargout{1} = V1; + varargout{2} = V2; + else + % --- + % get all the clips from the edges + % --- + ao = zeros(1,numel(V1)); + bo = zeros(1,numel(V1)); + co = zeros(1,numel(V1)); + for i =1:numel(V1) + [ao(i), bo(i), co(i)] = G.clipsE(V1(i), V2(i)); + end + varargout{1} = ao; + varargout{2} = bo; + varargout{3} = co; + end +end + +% --- +% add an edge saying sim(a,b) > sim(a,c) +% --- +function add_edge(G, a, b, c, weight) + + if nargin == 5 + V1 = add_node(G, a, b); + V2 = add_node(G, a, c); + + elseif nargin == 4 + V1 = a; + V2 = b; + weight = c; + end + + % --- + % call superclass + % --- + add_edge@DiGraph(G, V1, V2, weight); + +end + +function Vid = add_node(G, a, b) + + if nargin == 3 + Vid = G.node(a,b); + else + Vid = a; + end + + % --- + % call superclass + % --- + add_node@DiGraph(G, Vid); +end + +% --- +% the pairgraph variant of add_graph also updates the +% clip id hashmap +% ---- +function add_graph(G, G2) + + % determine if symmetric edges have to be added + clas = cat(1, class(G2), superclasses(G2)); + if strcellfind(clas, 'Graph') + add_symm = 1; + else + add_symm = 0; + end + + % --- + % add all nodes and edges in G2 + % --- + for V = G2.nodes(); + + [a, count] = sscanf(G2.label(V),'%u:%u'); + + if count == 2 + b = a(2); + a = a(1); + + G.add_node(a,b); + else + G.add_node(V); + end + end + + % --- + % NOTE / TODO: + % this LABEL inheritance is far too expensive when + % creating many copiesof the same graph + % Its also unnessecary for lots of them + % except for debugging :(. + % --- + G.Vlbl = cell(1, numel(G2.V)); + if G2.cardinality > 1 + G.Vlbl(G.nodes()) = G2.label(G2.nodes()); + else + G.Vlbl(G.nodes()) = {G2.label(G2.nodes())}; + end + + % --- + % get all edges in G2 + % CAVE: if we added the edges above using + % label clip indices, we have to address them + % using these below! + % --- + [val, V1, V2] = edges(G2); + + for i = 1:numel(val) + + % --- + % add edges to graph + % NOTE: we assume either all or no edges + % are labeled with clip indices + % --- + if count == 2 + % --- + % ok, they were labeled above, + % so index by clips. + % + % TODO: + % 1. get rid of this sscanf stuff and use add_edge for + % creating the nodes first, the only add single nodes + % 2. use cell labels globally instead of hashmap here + + [u, count] = sscanf(G2.label(V1(i)),'%u:%u'); + [v, count] = sscanf(G2.label(V2(i)),'%u:%u'); + + a = intersect(u,v); + b = setdiff(u,a); + c = setdiff(v,a); + + G.add_edge(a, b, c, val(i)); + + if add_symm + % --- + % add symmetric edges to graph + % --- + G.add_edge(a, c, b, val(i)); + end + + else + G.add_edge(V1(i), V2(i), val(i)); + + if add_symm + % --- + % add symmetric edges to graph + % --- + G.add_edge(V2(i), V1(i), val(i)); + end + + end + + + end + +end + +function remove_node(G, a, b) + + if nargin == 3 + Vid = G.node(a,b); + else + Vid = a; + end + + % --- + % call superclass + % --- + remove_node@DiGraph(G, Vid); +end + +% --- +% Clip ids for Edge +% --- +function [a,b,c] = clipsE(G, V1, V2) + + [c1, c2] = clipsN(G, V1); + [c3, c4] = clipsN(G, V2); + + % common clip + a = intersect([c1 c2],[c3 c4]); + + % nearer (similar) clip + b = setdiff([c1 c2],a); + + % far clip + c = setdiff([c3 c4],a); + + if isempty(a) + error 'clip similarity graph inconsistent' + end +end + +% --- +function out = label(G, Vid) + + if nargin == 1 + out = cell(numel(G.V), 1); + Vid = 1:numel(G.V); + end + + for i = 1:numel(Vid) + if (numel(G.Vlbl) < Vid(i)) || isempty(G.Vlbl{Vid(i)}) + + [sm, bg] = G.clipsN(Vid(i)); + + if numel(Vid) > 1 + out{i} = sprintf('%d:%d', sm, bg); + else + out = sprintf('%d:%d', sm, bg); + end + else + if numel(Vid) > 1 + out{i} = G.Vlbl{Vid(i)}; + else + out = G.Vlbl{Vid(i)}; + end + end + end +end + +% --- +% determines if Edges in G2 are the same as in G +% --- +function out = le(a,b) + out = isSubgraph(a, b); +end + +function [out] = isSubgraph(G, G2) + + [val, V1, V2] = G2.edges(); + + i = 1; + while i <= numel(V1) + % --- + % Test if edge exists in other graph, + % using clips as identifier + % --- + [a,b,c] = G2.clipsE(V1(i), V2(i)); + + if G.edge(a,b,c) ~= val(i) + out = 0; + return, + end + i = i + 1; + end + out = 1; +end + + +function [Gout] = minus(a, b) + Gout = feval(class(a),a); + Gout.remove_graph(b); +end + +function remove_graph(G, G2) + + % --- + % Get Edges in G2 and remove them + % --- + [~, V1, V2] = G2.edges(); + for i = 1:numel(V1) + % --- + % Test if edge exists in other graph, + % using clips as identifier + % --- + [a,b,c] = G2.clipsE(V1(i), V2(i)); + + G.remove_edge(a,b,c); + end + + % --- + % Note : we only remove nodes with no + % remaining incoming edges + % --- + V = G2.nodes(); + for i = 1:numel(V) + + % --- + % get corresponding node in G via clips + % --- + [a,b] = G2.clipsN(V(i)); + + Vid = node(G, a, b); + if G.degree(Vid) == 0 + + G.remove_node(Vid); + end + end +end +end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/ClipSimGraph.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/ClipSimGraph.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,191 @@ +% --- +% class ClipSimGraph +% Directed graph representing comparative clip similarities. +% +% each node represents a pair of clips, +% an edge (a,b) -> (a,c) is present if there is at least one +% users judging clip a more similar to clip b than to clip c +% +% The Edges thus represent the (are nearer to each othen than) +% expression +% --- + +classdef ClipSimGraph < ClipPairGraph + +% --- +% the methods +% --- +methods + + +% constructor of the graph +function G = ClipSimGraph(comparison, comparison_ids) + + % --- + % handle manual input + % --- + + if nargin == 1 + % --- + % this uses an imput grpaph + % to create a new MD graph + % --- + + G = ClipSimGraph(); + + % --- + % Add the input graph to the empty one + % --- + G.add_graph(comparison); + + % --- + % TODO: Copy Clip Pair Information if neccessary + % this is by now a handle copy + % --- + clas = cat(1, class(comparison), superclasses(comparison)); + if strcellfind(clas, 'ClipPairGraph') + + G.vid_map = comparison.vid_map; + end + + elseif nargin == 2 + + % --- + % handle automatic or manual input + % --- + for i = 1:size(comparison,1) + + % get clips and votes + clips = comparison_ids(comparison(i,1:3)); + votes = comparison(i,4:6); + + % for each triplet position create an edge reflecting the + % absolute and relative voting for this position + + % --- + % NOTE: we index 0 - 2 to ease the mod + % calculaton for the remaining indices + % --- + for v = 0:2 + + % --- + % has at least one user voted this clip out? + % If so, treat it as an outlier and determine score + % --- + if votes(v+1) > 0 + + a = mod(v+1, 3)+1; + b = mod(v+2, 3)+1; + c = v+1; + + % --- + % TODO: how to determine the weight? + % There are two options: relative in the triple + % and counting absolute hits. + % first we go for the relative ones. + % The other is implemented in ClipSimGraphMulti + % --- + + % --- + % add an edge saying sim(a,b) > sim(a,c) + % --- + G.add_edge(clips(a), clips(b), clips(c), votes(c) / (sum(votes))); + + % --- + % every outlier vote results in two + % dissimilarity equations + % --- + + % edge 2 + G.add_edge(clips(b), clips(a), clips(c), votes(c) / (sum(votes))); + + end + + end + end + end + +end + +% end constructor function + + +function [weights, a, b, c] = similarities(G) + % --- + % returns the weights of edges meaning sim(a,b) > sim(a,c) + % --- + % get edge weights + [weights, a, b, c] = edges(G); +end + +% --- +% add an edge saying sim(a,b) > sim(a,c) +% --- +% function add_edge(G, a, b, c, weight) + +function remove_edge(G, a, b, c) + + if nargin == 4 + V1 = add_node(G, a, b); + V2 = add_node(G, a, c); + + elseif nargin == 3 + V1 = a; + V2 = b; + end + + % --- + % call superclass + % --- + remove_edge@DiGraph(G, V1, V2); + end + +function set_edge(G, a, b, c, weight) + if nargin == 5 + V1 = add_node(G, a, b); + V2 = add_node(G, a, c); + + elseif nargin == 4 + V1 = a; + V2 = b; + weight = c; + end + + % --- + % call superclass + % --- + set_edge@DiGraph(G, V1, V2, weight); +end + +% --- +% simple mix of two sets of info about the same +% nodes: mean. +% NOTE: if there's more than two infos, the earlier +% information will loose influence +% --- +function join_edge(G, a, b, c, weight) + + if nargin == 5 + V1 = G.node(a, b); + V2 = G.node(a, c); + elseif nargin == 4 + V1 = a; + V2 = b; + weight = c; + end + + if G.edge(V1, V2) ~= 0 + + % set Edge to weight + G.E(V1, V2) = (G.E(V1, V2) + weight) /2; + + cprint(4, 'edge weight %d %d %d updated \n',a ,b , c) ; + else + + error 'nodes not found'; + end +end +end +end + + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/ClipSimGraphMD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/ClipSimGraphMD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,265 @@ +% --- +% class ClipSimGraphMD +% Directed graph representing comparative clip similarities. +% EACH pair of vertices has just ONE directed edge connecting them +% +% each node represents a pair of clips, +% an edge (a,b) -> (a,c) is present if there is at least one +% users judging clip a more similar to clip b than to clip c +% +% The Edges thus represent the (are nearer to each othen than) +% expression +% --- + +classdef ClipSimGraphMD < ClipSimGraph & handle + + +properties + + % --- + % History of edge weights, is a nodes x nodes cell. + % + % E_hist(i,j) = m x 2 Matrix containing + % Pair of information for each set [votes votes_complete] + hE; % History of edge weights +end + +% --- +% the methods +% --- +methods + +% constructor of the graph +function G = ClipSimGraphMD(comparison, comparison_ids) + + if nargin == 0 + % --- + % empty constructor + % --- + + elseif nargin == 1 + % todo: copy graph + + elseif nargin == 2 + + + % --- + % handle automatic or manual input + % --- + for i = 1:size(comparison,1) + + % get clips and votes + clips = comparison_ids(comparison(i,1:3)); + votes = comparison(i,4:6); + + % for each triplet position create an edge reflecting the + % absolute and relative voting for this position + + % --- + % NOTE: we index 0 - 2 to ease the mod + % calculaton for the remaining indices + % --- + for v = 0:2 + + % --- + % has at least one user voted this clip out? + % If so, treat it as an outlier and determine score + % --- + if votes(v+1) > 0 + + a = mod(v+1, 3)+1; + b = mod(v+2, 3)+1; + c = v+1; + + + % --- + % every outlier vote results in two + % dissimilarity equations, favored by + % the people who voted for the outlier + % --- + + % edge 1 + add_edge(G, clips(a), clips(b), clips(c), votes(v+1), sum(votes)); + + % edge 2 + add_edge(G, clips(b), clips(a), clips(c), votes(v+1), sum(votes)); + end + end + + end + end + +% end constructor function +end + +% adds a node stating clip a is more near % +% to clip b then clip c +function Vid = add_node(G, a, b) + + Vid = add_node@ClipSimGraph(G, a, b); + + G.hE{Vid,Vid} = []; + +end + +function remove_node(G, a, b) + + if nargin == 3 + + V1 = G.node(a, b); + elseif nargin == 2 + + V1 = a; + end + + if ~isempty(Vid) + + % --- + % look for edges connected to Vid + % and delete their histograms + % --- + G.hE(:,Vid) = []; + G.hE(Vid,:) = []; + + % --- + % TODO: Efficiently Remove the actual node from the + % GRAPH + % --- + G.Vclips(Vid,:) = []; + + end +end + +% --- +% add an edge saying sim(a,b) > sim(a,c) +% --- +function add_edge(G, a, b, c, votes, allvotes) + + V1 = add_node(G, a, b); + V2 = add_node(G, a, c); + + % --- + % save new weight values into histogram + % --- + if isempty(G.hE(V1, V2)) + + G.hE{V1,V2} = [votes allvotes]; + else + G.hE{V1,V2} = [G.hE{V1,V2}; votes allvotes]; + end + + % --- + % update Edges + % --- + G.update_E(a, b, c); + +end + +function remove_edge(G, a, b, c) + + if nargin == 4 + + V1 = G.node(a, b); + V2 = G.node(a, c); + elseif nargin == 3 + + V1 = a; + V2 = b; + end + + if ~isempty(V1) && ~isempty(V2) + + % set Edge to zero + G.hE{V1, V2} = []; + else + + error 'nodes not found'; + end +end + +% --- +% updates the edge weight given 3 clip ids or +% two nodes, based on the edges history +% +% The specified (V1,V2) and the symmetric edges' (V2,V1) weights +% are evaluated and the stronger edge will get +% the excessive weight while the loosing edge +% will be deleted +% --- +function update_E(G, a, b, c) +% update_E(G, a, b, c) +% update_E(G, V1, V2) + + % determine the type of input parameters + if nargin == 4 + + V1 = G.node(a, b); + V2 = G.node(a, c); + elseif nargin == 3 + + V1 = a; + V2 = b; + end + + % --- + % calculate weighted sum for specified edge + % and for symmetric edge + % --- + thisw = unbal_edge_weight(G, V1, V2); + symmw = unbal_edge_weight(G, V2, V1); + + % --- + % the two //competing// weights are now balanced + % --- + w = thisw - symmw; + + % --- + % set both edges + % --- + G.E(V1,V2) = max(w,0); + G.E(V2,V1) = -min(w,0); + +% if w >= 0 +% G.E(V1,V2) = w; +% G.E(V2,V1) = 0; +% elseif w < 0 +% G.E(V1,V2) = 0; +% G.E(V2,V1) = -w; +% end +end + +end + +methods (Hidden) + + % --- + % This is the core function of this Graph. + % it allows for the calculation of a single Edge, + % before it is balanced with its symmetrical counteredge + % + % --- + function thisw = unbal_edge_weight(G, V1, V2) + % --- + % trivial cases + % --- + if isempty(G.hE(V1, V2)) || isempty(G.hE{V1, V2}) + thisw = 0; + return; + end + + % --- + % Evaluate the single historical entries + % and sum up + % --- + thisw = sum(G.hE{V1, V2},1); + + % --- + % now divide the single votes by the number of + % votes totally made with the option to improve + % this edge + % --- + thisw = thisw(1) / thisw(2); + + end +end +end diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/ClipSimGraphMulti.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/ClipSimGraphMulti.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,176 @@ +% --- +% class ClipSimGraphMulti +% Directed MultiGraph representing comparative clip similarities. +% EACH pair of vertices has multiple directed edges connecting them +% +% each node represents a pair of clips, +% an edge (a,b) -> (a,c) is present if there is at least one +% users judging clip a more similar to clip b than to clip c +% +% Each edge thus represent a single (are nearer to each othen than) +% expression +% --- + +classdef ClipSimGraphMulti < ClipSimGraph & handle + + +properties + +end + +% --- +% the methods +% --- +methods + +% constructor of the graph +function G = ClipSimGraphMulti(comparison, comparison_ids) + + if nargin == 0 + % --- + % empty constructor + % --- + + elseif nargin == 1 + % todo: copy graph if same class + + % --- + % this uses an imput grpaph + % to create a new MD graph + % --- + + G = ClipSimGraphMulti(); + + % --- + % Add the input graph to the empty one + % --- + G.add_graph(comparison); + + elseif nargin == 2 + + + % --- + % handle automatic or manual input + % --- + for i = 1:size(comparison,1) + + % get clips and votes + clips = comparison_ids(comparison(i,1:3)); + votes = comparison(i,4:6); + + % for each triplet position create an edge reflecting the + % absolute and relative voting for this position + + % --- + % NOTE: we index 0 - 2 to ease the mod + % calculaton for the remaining indices + % --- + for v = 0:2 + + % --- + % has at least one user voted this clip out? + % If so, treat it as an outlier and determine score + % --- + if votes(v+1) > 0 + + a = mod(v+1, 3)+1; + b = mod(v+2, 3)+1; + c = v+1; + + + % --- + % every outlier vote results in two + % dissimilarity equations, favored by + % the people who voted for the outlier + % --- + + % edge 1 + add_edge(G, clips(a), clips(b), clips(c), votes(v+1)); + + % edge 2 + add_edge(G, clips(b), clips(a), clips(c), votes(v+1)); + end + end + + end + end + +% end constructor function +end + +function out = order(G) + + out = sum(G.V); +end + +function out = num_edges_multi(G) + out = sum(sum(G.E)); +end + +function out = num_edges(G) + out = sum(sum(G.E ~= 0)); +end + + + % --- +% NOTE: the weight explains the multiplicity of each +% edge, to correctily describe this, +% we just count the votes +% --- +function join_edge(G, a, b, c, weight) + + if nargin == 5 + V1 = G.node(a, b); + V2 = G.node(a, c); + elseif nargin == 4 + V1 = a; + V2 = b; + weight = c; + end + + if G.edge(V1, V2) ~= 0 + + % set add number to Edge index + G.E(V1, V2) = (G.E(V1, V2) + weight); + + cprint(3, 'edge weight %d %d %d updated \n',a ,b , c) ; + else + + error 'nodes not found'; + end +end + +% --- +% This eliminates any order-2 cycles, +% by substracting opposing edge counts +% --- +function remove_cycles_length2(G) + + G.E = max(0, G.E - G.E'); +end +% --- +% TODO: this back-and forth transform is a workaround +% avoiding some costly other transformation which would +% happen during the direct digraph call +% --- + +function [Gs, s, id] = connected_components(G, varargin) + % --- + % get all connected subgraphs: + % --- + G2 = DiGraph(G); + + [GsDiGraph, s, id] = connected_components@DiGraph(G2, varargin{:}); + + for i = 1:numel(GsDiGraph) + Gs(i) = ClipSimGraphMulti(GsDiGraph(i)); + end +end + + +end + +methods (Hidden) + +end +end diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/DistMeasureMahal.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/DistMeasureMahal.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,172 @@ +% --- +% The DistMeasureMahal class states a wrapper for +% special Mahalanobis similarity distance, and is compatible with the +% DistMeasure class +% --- +classdef DistMeasureMahal < handle + + properties (SetAccess = private) + + mahalW; + + featX; + + ids; + ret_ids; + end + + methods + + % --- + % constructor + % --- + function m = DistMeasureMahal(clips, mahalW, featX) + + if size(featX, 2) ~= numel(clips) || size(featX, 1) ~= length(mahalW) + error 'wrong input format' + end + + % fill index and generate matrix; + m.ids = [clips.id]; + + % reverse index + m.ret_ids = sparse(numel(m.ids),1); + m.ret_ids(m.ids) = 1:numel(m.ids); + + % --- + % save mahal Matrix and lazy-copy features + % --- + if size(mahalW, 1) ~= size(mahalW, 2) + + m.mahalW = diag(mahalW); + else + + m.mahalW = mahalW; + end + + m.featX = featX; + + end + + + % --- + % this compability function returns the + % mahalanobis similarity of two clip indices + % --- + function out = mat(m, idxa, idxb) + + if nargin == 1 + idxa = 1:numel(m.ids); + idxb = 1:numel(m.ids); + end + out = sqdist( m.featX(:,idxa), m.featX(:,idxb), m.mahalW); + end + + % --- + % returns the distance for the two input clips + % --- + function out = distance(m, clipa, clipb) + posa = m.get_clip_pos(clipa); + posb = m.get_clip_pos(clipb); + + out = m.mat(posa, posb); + end + + % --- + % returns a list of n (default = 10) clips most + % similar to the input + % --- + function [clips, dist] = get_nearest(m, clip, n) + % list = get_nearest(m, clip, n) + % + % returns a list of n (default = 10) clips most + % similar to the input + + % default number of results + if nargin == 2 + + n = 10; + end + + % return all clips in case n = 0 + if n == 0; n = numel(m.ids); end + + % get clip positions + pos = m.get_clip_pos(clip); + + % sort according to distance + [sc, idx] = sort( m.mat(pos, 1:numel(m.ids)), 'ascend'); + + % we only output relevant data + idx = idx(sc < inf); + + if numel(idx) > 0 + % create clips form best ids + clips = MTTClip( m.ids( idx(1:min(n, end)))); + dist = m.mat(pos, idx(1:min(n, end))); + + else + clips = []; + dist = []; + end + end + + + + function [clips, dist] = present_nearest(m, clip, n) + % plays and shows the n best hits for a given clip + + % default number of results + if nargin == 2 + + n = 3; + end + + % get best list + [clips, dist] = get_nearest(m, clip, n); + + clip.audio_features_basicsm.visualise(); + for i = 1:numel(clips) + fprintf('\n\n\n- Rank %d, distance: %1.4f \n\n',i, dist(i)); + + clips(i).audio_features_basicsm.visualise(); + h = gcf(); + t = clips(i).play(20); + pause(t); + close(h); + end + end + + function a = visualise(m) + + figure; + + % plot data + + imagesc(m.mat); + + a = gca; + set(a,'YTick',[1:numel(m.ids)], 'YTickLabel',m.ids); + set(a,'XTick',[1:numel(m.ids)], 'XTickLabel', m.ids); + + axis xy; + colormap(hot); + end + + % end methods + end + + % --- + % private methods + % --- + methods(Access = private) + + function out = get_clip_pos(m, clip) + % returns position in mat for given clip + + out = m.ret_ids(clip.id); + end + + end + +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/DistMeasureNNet.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/DistMeasureNNet.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,179 @@ +% --- +% The DistMeasureMahal class states a wrapper for +% special Mahalanobis similarity distance, and is compatible with the +% DistMeasure class +% --- +classdef DistMeasureNNet < handle + + properties (SetAccess = private) + + net; + + featX; + + ids; + ret_ids; + end + + methods + + % --- + % constructor + % --- + function m = DistMeasureNNet(clips, net, featX) + + if size(featX, 2) ~= numel(clips) + error 'wrong input format' + end + + % fill index and generate matrix; + m.ids = [clips.id]; + + % reverse index + m.ret_ids = sparse(numel(m.ids),1); + m.ret_ids(m.ids) = 1:numel(m.ids); + + % --- + % save neural net and lazy-copy features + % --- + m.net = net; + + m.featX = featX; + + end + + + % --- + % this function returns the + % similarity of two clip indices + % --- + function out = mat(m, idxa, idxb) + + if nargin == 1 + idxa = 1:numel(m.ids); + idxb = 1:numel(m.ids); + end + + % cycle through all index combinations + out = zeros(numel(idxa), numel(idxb)); + for i = 1:numel(idxa) + for j = 1:numel(idxb) + + % calculate distance vector + deltas = m.featX(:,idxa(i)) - m.featX(:,idxb(j)); + + % return distance from net + out(i,j) = m.net.calcValue(deltas); + end + end + + end + + % --- + % returns the distance for the two input clips + % --- + function out = distance(m, clipa, clipb) + posa = m.get_clip_pos(clipa); + posb = m.get_clip_pos(clipb); + + out = m.mat(posa, posb); + end + + % --- + % returns a list of n (default = 10) clips most + % similar to the input + % --- + function [clips, dist] = get_nearest(m, clip, n) + % list = get_nearest(m, clip, n) + % + % returns a list of n (default = 10) clips most + % similar to the input + + % default number of results + if nargin == 2 + + n = 10; + end + + % return all clips in case n = 0 + if n == 0; n = numel(m.ids); end + + % get clip positions + pos = m.get_clip_pos(clip); + + % sort according to distance + [sc, idx] = sort( m.mat(pos, 1:numel(m.ids)), 'ascend'); + + % we only output relevant data + idx = idx(sc < inf); + + if numel(idx) > 0 + % create clips form best ids + clips = MTTClip( m.ids( idx(1:min(n, end)))); + dist = m.mat(pos, idx(1:min(n, end))); + + else + clips = []; + dist = []; + end + end + + + + function [clips, dist] = present_nearest(m, clip, n) + % plays and shows the n best hits for a given clip + + % default number of results + if nargin == 2 + + n = 3; + end + + % get best list + [clips, dist] = get_nearest(m, clip, n); + + clip.audio_features_basicsm.visualise(); + for i = 1:numel(clips) + fprintf('\n\n\n- Rank %d, distance: %1.4f \n\n',i, dist(i)); + + clips(i).audio_features_basicsm.visualise(); + h = gcf(); + t = clips(i).play(20); + pause(t); + close(h); + end + end + + function a = visualise(m) + + figure; + + % plot data + + imagesc(m.mat); + + a = gca; + set(a,'YTick',[1:numel(m.ids)], 'YTickLabel',m.ids); + set(a,'XTick',[1:numel(m.ids)], 'XTickLabel', m.ids); + + axis xy; + colormap(hot); + end + + % end methods + end + + % --- + % private methods + % --- + methods(Access = private) + + function out = get_clip_pos(m, clip) + % returns position in mat for given clip + + out = m.ret_ids(clip.id); + end + + end + +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/LFMTagsDB.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/LFMTagsDB.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,129 @@ +% --- +% This is the wrapper and loader class for Last.Fm db +% and related data +% --- + +classdef LFMTagsDB < handle + + properties + + % connects the last.fm artists to MTTartist ids + matching_artists; + + % saves the last fm tags for last.fm artist ids + tagdb; + end + + properties(Dependent) + + lexicon; + end + + + methods + function db = LFMTagsDB(artistdb) + + % load data + load last.fm.mat; + + % --- + % NOTE: here, we save the corresponding magnatagatune + % artists. There make sure that the last.fm artists + % are properly associated! + % + % finally, the central last.fm artist index will refer + % to both the MTT artist database and the tags + % ---- + + % get all the clips artists + artists = artistdb.lexicon; + + % assign artists to Last.Fm artists + lexicon = []; + annots = []; + + justfmartists = fmartists(:,2); + for i = 1:numel(justfmartists) + + %get matching MTT for each Last.fm artist + pos = strcellfind(artists, justfmartists{i}); + + % save its id + if ~isempty(pos) + associds(i) = artistdb.get_annot_id(artists{pos}); + end + end + + % --- + % TODO: assert wheather if the fmartist id actually coincide + % justfmartists_ids = matching_artists.get_annotid(justfmartists); + % --- + + % call superclass constructor + db.tagdb = AnnotDB(fmartist_annots_names, ... + fmartist_annots, associds); + + % save the mathing artists + db.matching_artists = AnnotDB('clips_by_annot', ... + justfmartists, associds); + + end + end + + + methods + + % retrieve annot by clip + function [out, score, aid] = annots(db, clip) + + % --- + % NOTE: we retrieve the tags for the artist matching the clips + % one + % --- + + fma_id = db.matching_artists.annotids_for_owner(clip.artist_id()); + + artist = clip.artist(); + + if ~isempty(fma_id) + + [out, score, aid] = db.tagdb.annots(fma_id); + else + warning('Artist %s not found in Last.fm tags DB', artist); + end + end + + function out = annotids_for_owner(db, clip) + + % --- + % NOTE: we retrieve the tags for the artist matching the clips + % one + % --- + + fma_id = db.matching_artists.annotids_for_owner(clip.artist_id()); + + artist = clip.artist(); + + if ~isempty(fma_id) + + out = db.tagdb.annotids_for_owner( fma_id ); + else + warning('Artist %s not found in Last.fm tags DB', artist); + end + end + + function out = get.lexicon(db) + % gives back all the tags + + out = db.tagdb.lexicon; + end + end + + methods(Static) + + + end +end + + + \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTAudioFeature.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTAudioFeature.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,362 @@ +% --- +% Mother of allMTT Audio Features +% --- + +classdef MTTAudioFeature < handle + + % common properties + properties (Dependent) + + data; + end + + properties (Hidden) + + % --- + % TODO: get clip features by id and not by pos + % --- + + % position in db + db_pos; + end + + % do not save whole db into mat file + properties (Hidden, Transient) + + % database + my_db; + end + + + % common methods + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTAudioFeature(db, varargin) + % feature = MTTAudioFeatureBasicSm(db, varargin) + + if nargin >= 1 + % save database handle; + feature.my_db = db; + + if nargin >= 2 + if isnumeric(varargin{1}) + + feature.db_pos = varargin{1}; + + % --- + % read parameters from data, which should not be + % overwritten by the manual imputs ? + % --- + feature.set_params(feature.data.info.params); + + if numel(varargin) > 1 + warning('Specified parameters cant be applied with preexisting data'); + end + + else + + % set parameters for feature extraction + feature.set_params(varargin{:}); + end + end + + else + % --- + % TODO: empty constructor + % --- + end + end + + % --- + % get and set methods, as the lazy matlab referencing + % is of no use when the feature has to be adjusted + % during the progess of finalising + % --- + function data = get.data(feature) + + data = feature.my_db.featuredb(feature.db_pos); + end + + function set.data(feature, data) + + feature.my_db.featuredb(feature.db_pos) = data; + end + + function id = owner_id(feature) + % returns associated clip(s) + + for i = 1:numel(feature) + id(i) = feature(i).data.info.owner_id; + end + end + + function out = dim(feature) + % returns dimension of single feature vector + + out = feature(1).data.final.dim; + end + + function label = labels(features) + label = []; + if isfield(features(1).data.final, 'vector_info') + + % --- + % TODO: these labels should be stored at a central point, + % as this is a big waste of memory + % --- + label = features(1).data.final.vector_info.labels; + end + end + + function out = vector(features) + % returns the feature vector(s) + % if multiple features are input, the vectors are + % concatenated to a single matrix of size + % (feature.dim x numel(features) + + % finalise features if not done yet + if ~isfield(features(1).data, 'final') || features(1).data.final.dim == 0 + + features.finalise(); + end + + %shortcut for empty features + if features(1).data.final.dim < 1 + out =[]; + return + end + + out = zeros(features(1).data.final.dim, numel(features)); + for i = 1:numel(features) + + % finalise single feature vectors + if ~isfield(features(i).data, 'final') || features(i).data.final.dim == 0 + + features(i).finalise(); + end + + out(:,i) = features(i).data.final.vector; + end + end + + function out = common(feature) + % returns common feature values and params + + out = feature(1).my_db.commondb; + end + + % visualises all the finalised feature vectors at once + function a1 = visualise_vectors(features) + + % Collect source description data + ids = zeros( numel( features, 1)); + + for i = 1: numel(features) + + ids(i) = features(i).owner_id(); + end +% clips = MTTClip(ids); + + % get collective feature data + vec = features.vector(); + + % feature description data + labels = features(1).labels; + + % plot data + h = figure; + imagesc(vec); + a1 = gca(); + axis xy; + + set(a1,'XTick',1:numel(features), 'XTickLabel', ids); + + if ~isempty(labels) + set(a1,'YTick', [1:features(1).data.final.dim], 'YTickLabel', labels); + end + + xlabel('Clip ID'); + end + + + % --- + % compares the params of the feature with the + % provided param struct or feature + % --- + function out = eq_params(this, in) + % function out = eq_params(this, in) + % + % resolves if a feature with params(in) would be + % the same as the given feature + + type = this.my_db.featuredb_type; + + % --- + % construct dummy feature to get the params parsing right + % --- + if isstruct(in) + + % call constructor for this feature + % type without valid db + in = feval(type, 0, in); + end + + % return whether the strings are equal or not + out = strcmp(this.param_hash(), in.param_hash()); + end + + function unused = set_params(this, varargin) + % copies the parameters within the struct to the featrue instance, + % or + % uses the process_options function to set the parameters. + % this is used for initialisation / constructor of features + + unused = {}; + if numel(this) > 1 + + for i = 1:numel(this) + set_params(this(i), varargin{:}); + end + else + + if isstruct(varargin{1}) + + % ok, just copy the information + % this.my_params = varargin{1}; + + % --- + % NOTE: this is a compability script + % set fields succesively to keep new / nonset field names + % we do not add parameters!! + % --- + fields = fieldnames(varargin{1}); + for i = 1:numel(fields) + + % test whether the parameter is supported + if isfield(this.my_params, fields{i}) + + this.my_params.(fields{i}) = varargin{1}.(fields{i}); + end + end + else + + % get all parameter lines + fields = fieldnames(this.my_params); + + outputstring = ''; + inputstring = ''; + + % we collect the string for all the fields + for i = 1:numel(fields) + + % generate left- hand side of param collection + outputstring = sprintf('%s this.my_params.%s',... + outputstring, fields{i}); + + % right-hand-side + inputstring = sprintf('%s ''%s'', this.my_params.%s',... + inputstring, fields{i}, fields{i}); + + if i < numel(fields) + + % put comma behind last argument + inputstring = sprintf('%s,',inputstring); + outputstring = sprintf('%s,',outputstring); + end + + end + + % evaluate set + eval(sprintf('[%s, unused] = process_options(varargin, %s);', ... + outputstring, inputstring)); + end + end + end %fun + + % --- + % saveto function + % --- + function saveto(features, matfile) + % saves (multiple features to a .mat file) + + % get clip ids + c = zeros(numel(features), 1); + for i = 1:numel(features) + c(i) = features(i).owner_id(); + end + + % build clips + clips = MTTClip(c); + + % let db export the corresponding clips + features(1).my_db.export(matfile, clips); + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + end + + methods (Hidden = true) + + function assign(feature, db_pos) + % sets the feature data link + + feature.db_pos = db_pos; + end + end + + methods (Static) + + function ph = param_hash(type, varargin) + % loads the params for a feature type and adds the + % given parameter values to it. + + % this function can be static or dynamic + if nargin > 1 + % static case + dummy = feval(type,[], varargin{:}); + else + % dynamic case + dummy = type; + end + ph = hash(xml_format(dummy.my_params),'MD5'); + end + + function params = inherited_params(type, varargin) + % loads the params for a feature type and adds the + % given parameter values to it. + + dummy = feval(type); + + params = dummy.my_params; + + % basic check if the input is correct + if mod(numel(varargin), 2) ~= 0 + error('number of params does not match number of values'); + end + + % add the fields to the struct + for i = 1:2:(numel(varargin)-1); + + params.(varargin{i}) = varargin{i+1}; + end + end + end +end + + + + + + + \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTAudioFeatureBasicSm.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTAudioFeatureBasicSm.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,805 @@ +classdef MTTAudioFeatureBasicSm < MTTAudioFeature & handle + % --- + % the MTTAudioFeatureBasicSm Class contains + % a basic summary of chroma, mfcc and tempo features + % a few common chroma and mfcc vectors are concatenated + % along with some clip-wide variance + % a metric / rhythm fingerprint is added + % + % The usual workflow for these features consists of three steps + % 1. extract: extracts the basic single-file dependent features + % 2. define_global_transform: calculates the global feature + % transformation parameters + % 3. finalise: applies the common transformations to a specific feature + % --- + + properties(Constant = true) + + % svn hook + my_revision = str2double(substr('$Rev$', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = struct(... + 'nchromas', 4, ... % 4 chroma vectors + 'chroma_var', 0, ... % chroma variance + 'norm_chromas', 0, ... % not implemented, chromas already rel. + 'min_kshift_chromas', 0.1, ... % treshold for key shift. set to 1 for no shift (0-1) + ... + 'ntimbres', 4, ... + 'timbre_var', 0, ... % timbre variance + 'norm_timbres', 1, ... + 'clip_timbres', 0.85, ... % percentile of data which has to be inside 0-1 bounds + ... + 'norm_weights',0, ... % globally norm weights for chroma times? + 'norm_interval',1, ... + 'max_iter',100, ... % max iterations for chroma and timbre knn + ... + 'nrhythms', 0, ... + 'nints', 11, ... + 'energy_sr', 1000, ... % sample rate for energy curve + 'norm_acorr', 1 ... % normalise arcorr locally-> shape imp... energy is normalised anyways + ); + end + + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTAudioFeatureBasicSm(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + + end + % --- + % extract feature data from raw audio features + % --- + function data = extract(feature, clip) + % --- + % get Basic Summary audio features. this includes possible + % local normalisations + % --- + + global globalvars; + + rawf = clip.audio_features_raw(); + + % --- + % now extract the features + % first step: chroma clustering + % --- + weights = [rawf.data.segments(:).duration]; + + % normalise weights + weights = weights / rawf.data.duration; + + chroma = [rawf.data.segments(:).pitches]'; + + % --- + % get most present chroma vectors. + % the weighted k-means should return the four most prominent + % chroma vectors and their weight + % --- + % display error values + + op = foptions(); + op(1) = 0; + op(14) = feature.my_params.max_iter; + + % check for trivial case + if feature.my_params.nchromas == 0 + + chromas = []; + cwght = []; + + elseif feature.my_params.nchromas == 1 + + chromas = mean(chroma, 1); + chroma_var = var(chroma, 0, 1); + cwght = 1; + + elseif numel(weights) > feature.my_params.nchromas + + % --- + % there may be few chromas, try kmeans several (20) times + % --- + cont = 0; + cwght = []; + while (numel(cwght) ~= feature.my_params.nchromas) && (cont < 20); + + [chromas, cwght, post] = ... + weighted_kmeans(feature.my_params.nchromas, chroma, weights, op); + + cont = cont + 1; + end + + if (numel(cwght) ~= feature.my_params.nchromas) + + error('cannot find enough chroma centres'); + end + + % --- + % Calculate the weighted variance of the chroma clusters + % --- + if feature.my_params.chroma_var >= 1 + + chroma_var = zeros(size(chromas)); + for i = 1:size(chroma_var,1) + + % get distance from cluster centroid + tmp_var = (chroma(post(:,i),:) - repmat(chromas(i,:), sum(post(:,i)),1)).^2; + + % add up the weighted differences and normalise by sum + % of weights + chroma_var(i,:) = (weights(post(:,i)) * tmp_var) ./... + (sum(weights(post(:,i)))); + end + end + else + % --- + % odd case: less than nchroma data points. + % we repeat the mean vector at the end + % --- + chromas = [chroma; repmat(mean(chroma, 1),... + feature.my_params.nchromas - numel(weights), 1 )]; + + cwght = weights; + cwght( end + 1:feature.my_params.nchromas ) = 0; + + % --- + % TODO: get a variance for odd case : + % replicate the complete data variance? + % NO: every vector is a clsuter => zero variance + % --- + end + + % trivial case: no variance requested + if ~exist('chroma_var','var') + chroma_var = zeros(size(chromas)); + end + + % sort by associated time + [cwght, idx] = sort(cwght, 'descend'); + chromas = chromas(idx,:); + chroma_var = chroma_var(idx,:); + + % --- + % shift according to detected key, but only if + % the confidencee is high enough + % --- + shift = 0; + if rawf.data.keyConfidence > feature.my_params.min_kshift_chromas; + + shift = -rawf.data.key; + chromas = circshift(chromas, [0 shift]); + chroma_var = circshift(chroma_var, [0 shift]); + end + + % --- + % get mfcc centres: + % the same for mfccs + % --- + mfcc = [rawf.data.segments(:).timbre]'; + if feature.my_params.ntimbres == 0 + + mfccs = []; + mwght = []; + + elseif feature.my_params.ntimbres == 1 + + mfccs = mean(mfcc, 1); + timbre_var = var(mfccs, 0, 1); + mwght = 1; + + elseif numel(weights) > feature.my_params.ntimbres + + % --- + % there may be few mfccs, try kmeans several times + % --- + cont = 0; + mwght = []; + while (numel(mwght) ~= feature.my_params.ntimbres) && (cont < 20); + + [mfccs, mwght, post] = ... + weighted_kmeans(feature.my_params.ntimbres, mfcc, weights, op); + cont = cont + 1; + end + + if (numel(mwght) ~= feature.my_params.ntimbres) + + error('cannot find enough mfcc centres'); + end + + % --- + % Calculate the weighted variance of the chroma clusters + % --- + if feature.my_params.timbre_var >= 1 + + timbre_var = zeros(size(mfccs)); + for i = 1:size(timbre_var,1) + + % get distance from cluster centroid + tmp_var = (mfcc(post(:,i),:) - repmat(mfccs(i,:), sum(post(:,i)),1)).^2; + + % add up the weighted differences and normalise by sum + % of weights + timbre_var(i,:) = (weights(post(:,i)) * tmp_var) ./... + (sum(weights(post(:,i)))); + end + end + + else + % --- + % odd case: less than nchroma data points. + % we repeat the mean vector at the end + % --- + mfccs = [mfcc; repmat(mean(mfcc, 1),... + feature.my_params.ntimbres - numel(weights), 1)]; + mwght = weights; + mwght( end + 1:feature.my_params.ntimbres) = 0; + end + + % trivial case: no variance requested + if ~exist('timbre_var','var') + timbre_var = zeros(size(mfccs)); + end + + % sort by associated time + [mwght, idx] = sort(mwght, 'descend'); + mfccs = mfccs(idx,:); + timbre_var = timbre_var(idx,:); + + % --- + % get beat features: + % the autocorrelation curve over n quarters of length + % + % alternative: how about using the n=8 quarters relative + % volumes from the start of a sure measure? + % --- + if feature.my_params.nrhythms >= 1 + [bars, beats, tatums] = rawf.infer_bar_times(); + % --- + % NOTE: the beat and tatum markers seem to have an offset :( + % --- + offset = 0.118; %seconds + + [envelope, time] = energy_envelope(feature, clip); + + % we offset the energy curve + time = time + offset; + + % --- + % we try to start at the best beat confidence more + % than sixteen eights from the end + % --- + + if rawf.data.tempo > 0 + + eightl = 30 / rawf.data.tempo; + else + % --- + % odd case: no rhythm data. assume 100 bpm + % --- + + eightl = 0.3; + end + + if isempty(beats) + % --- + % odd case: no beats detected. -> use best tatum + % --- + if ~isempty(tatums) + + beats = tatums; + else + + % ok, just take the beginning + beats = [0; 1]; + end + end + + last_valid = find(beats(1,:) < ... + (rawf.data.duration - feature.my_params.nints * eightl),1, 'last'); + + % find the best valid beat postition + [null, max_measure] = max( beats(2, 1:last_valid)); + max_mtime = beats(1,max_measure); + + % --- + % the correlation is calculated for the estimated eights lenght + % and for the 16th intervals, respectively. + % --- + + % calculate the EIGHTS correlation for the following segment + [acorr8, eight_en, eightt] = ... + beat_histogram(feature, max_mtime, eightl, envelope, time); + + % calculate the SIXTEENTHS correlation for the following segment + [acorr16, six_en, sixt] = ... + beat_histogram(feature, max_mtime, eightl / 2, envelope, time); + + % --- + % save the various features + % --- + % save rythm feature data + + data.rhythm.acorr8 = acorr8; + data.rhythm.acorr8_lag = eightt(1:end/2)-eightt(1); + + data.rhythm.energy8 = eight_en(1:end/2); + data.rhythm.energy8_time = eightt(1:end/2); + + % -- + % the interval is normed locally up to a max value + % associated to 30bpm + % --- + if feature.my_params.norm_interval + + % 1 second max value + data.rhythm.interval8 = eightl / 2; + else + data.rhythm.interval8 = eightl / 2; + end + + if feature.my_params.nrhythms >= 2 + + data.rhythm.acorr16 = acorr16; + data.rhythm.acorr16_lag = data.rhythm.acorr8_lag / 2; + + data.rhythm.energy16 = six_en(1:end/2); + data.rhythm.energy16_time = sixt(1:end/2); + + + % save beat interval / tempo + if feature.my_params.norm_interval + + % 1 second max value + data.rhythm.interval16 = eightl / 2; + else + data.rhythm.interval16 = eightl / 2; + end + + end + else + +% % save empty rythm struct +% data.rhythm = struct([]); + end + + % chroma feature data + for i = 1:size(chromas,1) + data.chroma(i).means = chromas(i,:)'; + data.chroma(i).means_weight = cwght(i); + data.chroma(i).vars = chroma_var(i,:)'; + data.chroma(i).shift = shift; + end + + % mfcc feature data + for i = 1:size(mfccs,1) + data.timbre(i).means = mfccs(i,:)'; + data.timbre(i).means_weight = mwght(i); + data.timbre(i).vars = timbre_var(i,:)'; + end + + % prepare field for final features + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + + % save info data + data.info.type = 'MTTAudioFeatureBasicSm'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + end + + function define_global_transform(features) + % calculate and set normalization factors from the group of + % input features. These features will be set for the full database + + if numel(features) == 1 + error ('Insert feature array for this method'); + end + + % --- + % here, we only need to define the post-normalisation + % --- + + % --- + % get chroma variance data NORMALISATION Factors + % TODO: transport chroma variance to finalise step + % --- + if features(1).my_params.chroma_var >= 1 + allfeat = abs(cat(2, features(1).data.chroma(:).vars)); + for i = 2:numel(features) + + allfeat = cat(2 , allfeat, abs(abs(cat(2, features(i).data.chroma(:).vars)))); + end + [~, common.post_normf.chroma_var] = mapminmax(allfeat,0,1); + end + + % --- + % get timbre variance data NORMALISATION Factors + % TODO: transport chroma variance to finalise step + % --- + if features(1).my_params.timbre_var >= 1 + allfeat = abs(cat(2, features(1).data.timbre(:).vars)); + for i = 2:numel(features) + + allfeat = cat(2 , allfeat, abs(abs(cat(2, features(i).data.timbre(:).vars)))); + end + [~, common.post_normf.timbre_var] = mapminmax(allfeat,0,1); + end + + % --- + % derive normalisation for timbre features: + % MFCC's are actually special filter outputs + % (see developer.echonest.com/docs/v4/_static/AnalyzeDocumentation_2.2.pdf + % they are unbounded, so just the relative information will be + % used here. + % We normalise each bin independently + % --- + if features(1).my_params.ntimbres > 0 + + allfeat = abs(cat(2, features(1).data.timbre(:).means)); + for i = 2:numel(features) + + allfeat = cat(2 , allfeat, abs(cat(2, features(i).data.timbre(:).means))); + end + + % --- + % get normalisation factors + % NOTE: the values will later be clipped to [0,1] + % anyways + % --- + if (features(1).my_params.clip_timbres ~= 0 ) || ... + (features(1).my_params.clip_timbres ~= 1 ) + + common.post_normf.timbre = 1 ./ prctile(allfeat, features(1).my_params.clip_timbres * 100, 2); + + else + % just use the maximum + common.post_normf.timbre = 1/max(allfeat, 2); + end + + % set common feature values + features(1).my_db.set_common(common); + + else + + features(1).my_db.set_common([1]); + end + end + + + function finalise(feature) + % applies a final transformation and + % collects the information of this feature within a single vector + % see info for types in specific dimensions + + for i = 1:numel(feature) + + % check for neccesary parameters + if isempty(feature(i).my_db.commondb) + + error('Define the global transformation first') + return; + end + + if feature(1).my_params.ntimbres > 0 + % --- + % normalise features + % --- + % norm timbre features if neccesary + timbren = []; + if feature(i).my_params.norm_timbres + for j = 1:numel(feature(i).data.timbre) + + timbren = cat(1, timbren, ... + MTTAudioFeatureBasicSm.norm_timbre... + (feature(i).data.timbre(j).means, feature(i).my_db.commondb.post_normf.timbre)); + end + else + + timbren = cat(1, timbren, feature(i).data.timbre(:).means); + end + end + + % --- + % construct resulting feature vector out of features + % --- + vec = []; + info = {}; + if feature(i).my_params.nchromas > 0 + + info{numel(vec)+ 1} = 'chroma'; + vec = cat(1, vec, feature(i).data.chroma(:).means); + + info{numel(vec)+ 1} = 'chroma weights'; + vec = cat(1, vec, [feature(i).data.chroma(:).means_weight]'); + + % --- + % NORMALISE Chroma variance + % --- + if feature(i).my_params.chroma_var >= 1 + + info{numel(vec)+ 1} = 'chroma variance'; + + % normalise this pack of variance vectors + tmp_var = mapminmax('apply', [feature(i).data.chroma(:).vars],... + feature(i).common.post_normf.chroma_var); + + % concatenate normalised data to vector + for vari = 1:size(tmp_var,2) + + vec = cat(1, vec, tmp_var(:, vari)); + end + end + end + + + if feature(i).my_params.ntimbres > 0 + + info{numel(vec)+ 1} = 'timbre'; + vec = cat(1, vec, timbren); + + info{numel(vec)+ 1} = 'timbre weights'; + vec = cat(1, vec, [feature(i).data.timbre(:).means_weight]'); + + % --- + % NORMALISE timbre variance + % --- + if feature(i).my_params.timbre_var >= 1 + + info{numel(vec)+ 1} = 'timbre variance'; + + % normalise this pack of variance vectors + tmp_var = mapminmax('apply', [feature(i).data.timbre(:).vars],... + feature(i).common.post_normf.timbre_var); + + % concatenate normalised data to vector + for vari = 1:size(tmp_var,2) + + vec = cat(1, vec, tmp_var(:, vari)); + end + end + end + + if feature(i).my_params.nrhythms > 0 + + info{numel(vec)+ 1} = 'rhythm 8'; + vec = cat(1, vec, feature(i).data.rhythm.acorr8); + + info{numel(vec)+ 1} = 'int 8'; + vec = cat(1, vec, feature(i).data.rhythm.interval8); + + if feature(i).my_params.nrhythms >= 2 + + info{numel(vec)+ 1} = 'rhythm 16'; + vec = cat(1, vec, feature(i).data.rhythm.acorr16); + + info{numel(vec)+ 1} = 'int 16'; + vec = cat(1, vec, feature(i).data.rhythm.interval16); + end + end + + feature(i).data.final.vector = vec; + feature(i).data.final.dim = numel(feature(i).data.final.vector); + + % fill up info struct and append to feature + + info(end+1: feature(i).data.final.dim) = ... + cell(feature(i).data.final.dim - numel(info),1); + + feature(i).data.final.vector_info.labels = info; + end + + % --- + % TODO: Maybe delete more basic features again at this point? + % --- + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + + + function visualise(feature) + % --- + % plots the different data types collected in this feature + % --- + for i = 1:numel(feature) + clip = MTTClip(feature(i).owner_id()); + + % display raw features + rawf = clip.audio_features_raw(); + + [a1, a2, a3] = rawf.visualise(); + + % --- + % Display chroma features + % --- + if isfield(feature(i).data, 'chroma') + + chroma_labels = {'c', 'c#', 'd','d#', 'e', 'f','f#', 'g','g#', 'a', 'a#', 'h'}; + mode_labels = {'minor', 'major'}; + + % change labels to reflect detected mode + chroma_labels{rawf.data.key + 1} = ... + sprintf('(%s) %s',mode_labels{rawf.data.mode + 1}, chroma_labels{rawf.data.key + 1}); + + % transpose labels and data + chroma_labels = circshift(chroma_labels, [0, feature(i).data.chroma(1).shift]); + chromar = circshift([rawf.data.segments(:).pitches], [feature(i).data.chroma(1).shift, 0]); + + % image transposed chromas again + segments = [rawf.data.segments(:).start]; + segments(end) = rawf.data.duration; + + hold(a1); + uimagesc(segments, 0:11, chromar, 'Parent', a1); + set(a1,'YTick',[0:11], 'YTickLabel', chroma_labels); + + % enlarge plot and plot new data after the old ones + ax = axis(a1); + ax(2) = ax(2) + 2*feature(i).my_params.nchromas + 0.5; + axis(a1, 'xy'); + axis(a1, ax); + + imagesc(rawf.data.duration + (1:feature(i).my_params.nchromas), (-1:11), ... + [ feature(i).data.chroma(:).means_weight; feature(i).data.chroma(:).means],... + 'Parent', a1); + % variance calculated? + if isfield(feature(i).data.chroma, 'vars') + + imagesc(rawf.data.duration + feature(i).my_params.nchromas + (1:feature(i).my_params.nchromas), (-1:11), ... + [feature(i).data.chroma(:).vars],... + 'Parent', a1); + end + end + + % --- + % Display timbre features + % --- + if isfield(feature(i).data, 'timbre') + + % enlarge plot and plot new data after the old ones + hold(a2); + ax = axis(a2); + ax(2) = ax(2) + 2*feature(i).my_params.ntimbres + 0.5; + + axis(a2, ax); + imagesc(rawf.data.duration + (1:feature(i).my_params.ntimbres), (-1:11), ... + [ feature(i).data.timbre(:).means_weight; feature(i).data.timbre(:).means],... + 'Parent', a2); + if isfield(feature(i).data.timbre, 'vars') + + imagesc(rawf.data.duration + feature(i).my_params.ntimbres + (1:feature(i).my_params.ntimbres), (-1:11), ... + [feature(i).data.timbre(:).vars],... + 'Parent', a1); + end + end + + % --- + % Display rhythm features + % --- + if isfield(feature(i).data, 'rhythm') + % data.rhythm.interval + % get timecode + eightt = feature(i).data.rhythm.energy8_time; + sixt = feature(i).data.rhythm.energy16_time; + + hold(a3); + % plot sixteens acorr and energy + plot(sixt, feature(i).data.rhythm.energy16, 'bx') + + plot(sixt, feature(i).data.rhythm.acorr16, 'b') + + % plot eights acorr and energy + plot(eightt, feature(i).data.rhythm.energy8, 'rx') + + plot(eightt, feature(i).data.rhythm.acorr8, 'r') + + % broaden view by fixed 4 seconds + ax = axis(a3); + axis(a3, [max(0, eightt(1)-( eightt(end) - eightt(1) + 4 )) ... + min(rawf.data.duration, eightt(end) +4) ... + ax(3:4)]); + end + end + end + end + + + methods (Hidden = true) + + function [env, time] = energy_envelope(feature, clip) + % extracts the envelope of energy for the given clip + + % --- + % TODO: externalise envelope etc in external audio features + % --- + + [null, src] = evalc('miraudio(clip.mp3file_full())'); + [null, env] = evalc('mirenvelope(src, ''Sampling'', feature.my_params.energy_sr)'); + + time = get(env,'Time'); + time = time{1}{1}; + env = mirgetdata(env); + end + + function [acorr, base_sig, base_t] = beat_histogram(feature, startt, interval, signal, signal_t) + % acorr = beat_histogram(feature, startt, interval, signal, time) + % + % compute correlation for beats of specified length in energy curve + + % get corresponding energy values + dt = signal_t(2) - signal_t(1); + base_t = startt:interval:(startt + (feature.my_params.nints*2-1) * interval); + base_sig = signal( min( numel(signal), max(1,round((base_t - signal_t(1))/dt)))); + + % normalise energy + acbase_sig = base_sig./max(base_sig); + + % calculate their cyclic autocorrelation + acorr = circshift(xcorr(acbase_sig,acbase_sig(1:end/2)),... + [numel(acbase_sig) 0]); + + % cut acorr to relevant points, normalise and square + acorr = (acorr(1:feature.my_params.nints)./feature.my_params.nints).^2; + + % --- + % NOTE: we normalise the autocorrelation locally, to compare the + % (rhythmic) shape + % --- + if feature.my_params.norm_acorr; + + acorr = acorr - min(acorr); + acorr = acorr/max(acorr); + end + end + end + + methods(Static) + + function timbre = norm_timbre(in, normfs) + % returns normed timbre data + + % --- + % individually scale the data using + % the dimensions factors + % --- + timbre = zeros(size(in)); + for i = 1:size(in,2) + + timbre(:,i) = normfs .* in(:,i); + end + + % shift to positive values + timbre = (1 + timbre) /2; + + % clip features to [0,1] + timbre = min(1, max(timbre, 0)); + end + + % --- + % returns parameter md5 hash for comparison + % --- + end + +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTAudioFeatureDBgen.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTAudioFeatureDBgen.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,425 @@ +classdef MTTAudioFeatureDBgen < handle + + % --- + % the database is stored as a global variable + % --- + + properties (Hidden) + + % feature databases + featuredb; + + commondb; + + featuredb_type; + end + + + properties (Hidden) + + % --- + % We use a "db_magnaaudiofeat_xxx" class pointer for all the loaded features, + % which is supposed to work like a cache. + % my_dbpos links to the position of this feature in the db. + % --- + + % database hash + featuredb_hash; + end + + % --- + % member functions + % --- + methods + + % constructor + function db = MTTAudioFeatureDBgen(type, matfile) + + if nargin >= 1 + + % --- + % we test for correct type by + % using an call to the empty arg constructor function + % --- + try + feval(type); + + catch err + fprintf('%s\n', err.message); + error('The specified class does not provide an constructor') + end + db.featuredb_type = type; + + if nargin >=2 && ~isempty(dir(matfile)) + + + % import database if filename given + db.import(matfile); + end + + end + + % try to load a standard db from mat file? + end + + % --- + % database retrieval. + % this should return a handle to a feature class pointing in this + % global feature database. + % + % parameters can be passed via the varargin parameter + % --- + function features = get_features(db, clip, varargin) + + % --- + % TODO: error checking and processing + % --- + + % --- + % prepare for multiple clips + % --- + if numel(clip) > 1 + + % iterate for all the found clips + features = feval(db.featuredb_type); + for i = 1:numel(clip) + features(i) = get_features(db, clip(i), varargin{:}); + end + else + % --- + % single feature extraction + % --- + + pos = db.get_clip_pos(clip); + if ~pos + + % assign new database position + pos = db.get_next_pos(); + + % call feature constructor + features = feval(db.featuredb_type, db, varargin{:}); + + % extract / load feature data + + cprint(2 ,'Extracting %s for clip %d \n', db.featuredb_type, clip.id()); + + if isempty( db.featuredb) + + db.featuredb = features.extract(clip); + else + + db.featuredb(pos) = features.extract(clip); + end + % --- + % NOTE: + % IF WE ARE SURE THAT EVERYTHING HAS WORKED OUT FINE: + % assign new database position in cache + % --- + db.set_pos(pos, clip); + + % --- + % NOTE: feature objects are directly linked to DB + % positions + % --- + features.assign(pos); + else + + % just return the cached link + features = feval(db.featuredb_type, db, pos); + end + + % --- + % finalise features if possible (commondb not empty) + % --- + if ~isempty(db.commondb) && isempty(features.data.final) && ... + ismethod(features,'finalise') + + % call finalise + features.finalise(); + end + + end + end + + function set_common(db, common) + % sets the common database to input + + if isempty(db.commondb) + + cprint(2, 'Setting common feature values\n'); + else + + cprint(1, 'Common feature values changed'); + end + + db.commondb = common; + end + + function export(db, matfile, clips) + % saves featuredb to matlab data file + + global globalvars; + % save revision for later version and compability control + info.creatorrev = globalvars.camir.revision; + + cprint(2, 'Exporting %s database to %s ...\n', db.featuredb_type, matfile); + if nargin == 3 + % --- + % TODO: create new hash + % --- + for i = 1:numel(clips) + pos(i) = db.get_clip_pos(clips(i)); + + if pos(i) == 0 + error('Corrupted database'); + end + end + + % get specific data set + featuredb = db.featuredb(pos); + + featuredb_hash = db.featuredb_hash(pos); + + else + featuredb = db.featuredb; + + featuredb_hash = db.featuredb_hash; + end + + commondb = db.commondb; + + featuredb_type = db.featuredb_type; + + save(matfile, 'featuredb', 'commondb', 'featuredb_type', 'featuredb_hash'); + end + + function [features, clips] = import(db, matfile, type) + % saves featuredb to matlab data file + + cprint(2, 'importing features from %s', matfile) + load(matfile,'-MAT'); + + if ~strcmp(featuredb_type, db.featuredb_type) + + error('feature type of db to import does not match'); + end + + % --- + % TODO / FIXME: check parameter hash before importing + % --- + +% if db.size() > 0 +% +% % get a feature param from the db; +% last_pos = db.get_last_pos; +% dummyparams = db.featuredb(last_pos).info.params; +% +% % --- +% % construct a dummy feature and compare parameters to +% % the params in the database +% % --- +% dummyclip = MTTClip(db.featuredb_hash(last_pos)); +% dummyfeat = db.get_features(dummyclip, dummyparams); +% +% if ~dummybsm.eq_params(fparams(i)) +% +% db_magnaaudiofeat_basicsm.reset; +% end +% +% end + + + clips = MTTClip(featuredb_hash); + + % --- + % import features individually into db + % --- + + for i = 1:numel(clips) + + % test if database already contains clip + if ~db.get_clip_pos(clips(i)); + + % get position for this database + pos = db.get_next_pos(); + + % copy values + if ~isempty(db.featuredb) + + db.featuredb(pos) = featuredb(i); + elseif pos == 1; + + db.featuredb = featuredb(i); + else + + error ('Corrupted database'); + end + % update hash + db.set_pos(pos, clips(i)); + end + end + +% Set common features + db.set_common(commondb); + + % retrieve features; + features = get_features(db, clips); + + end + + function remove_features(db, clip) + % weakly deletes clip features from db + + clear_pos(clip); + end + + function delete(db) + % --- + % probably not anything to do here, as we want to + % keep the db! + % see static method destroy + % --- + end + + function reset(db) + % --- + % deletes all the cached data and destroys the + % global feature database + % --- + db.commondb = []; + db.featuredb = []; + db.featuredb_hash = []; + end + + function out = size(db) + % returns the number of features saved in this db + + out = sum(db.featuredb_hash > 0); + end + + function memory(db) + % returns size of whole db in bytes + + % --- + % TODO: Make this work + % --- + + fprintf(' \n This db contains feature sets for %d clips\n ',numel(db.featuredb_hash)) + % get local copies of data + featuredb = db.featuredb; + featuredb_hash = db.featuredb_hash; + commondb = db.commondb; + + whos('featuredb', 'featuredb_hash', 'commondb') + end + end + + % --- + % private functions + % --- + methods (Hidden) + + % --- + % Hash functions + % --- + function out = get_clip_pos(db, clip) + % should become database hashing function + + out = find(db.featuredb_hash == clip.id); + if isempty(out) + out = 0; + end + end + + function out = get_next_pos(db) + % return index for the new clip features + + out = numel(db.featuredb_hash) + 1; + end + + function last_pos = get_last_pos(db) + % return index the last valid db entry + + last_pos = find(db.featuredb_hash > 0, 1, 'last'); + end + + + function set_pos(db, pos, clip) + % set index for the new clip features + + db.featuredb_hash(pos) = clip.id; + end + + function clear_pos(db, clip) + % remove index of the clip features + + db.featuredb_hash(get_clip_pos(db, clip)) = 0; + end + end + + methods (Static) + + % --- + % this resets all feature dbs except the one exluded in the + % 'exclude', {''} cell + % --- + function reset_feature_dbs(varargin) + + [exclude] = process_options(varargin,'exclude',{}); + % --- + % resets all feature dbs except raw features + % --- + vars = whos ('*','global','-regexp', '^db_*'); + + % --- + % check if each is class of DBgen. + % if not in exclude variable, reset + % --- + for i = 1:numel(vars) + + % import global variable + eval(sprintf('global %s',vars(i).name)); + + if strcmp(eval(sprintf('class(%s)',vars(i).name)), 'MTTAudioFeatureDBgen') ... + && isempty(strcellfind(exclude, vars(i).name)) + + eval(sprintf('%s.reset',vars(i).name)); + end + end + end + + function featuredb_type = import_type(matfile) + % function featuredb_type = import_type(matfile) + % + % returns the type of the saved feature db. + + load(matfile, 'featuredb_type', '-MAT'); + end + + function db_nameo = db_name(type) + % returns the standard global var name for a db of given type + + switch type + case 'MTTAudioFeatureRAW' + db_nameo = 'db_magnaaudiofeat'; + + case 'MTTAudioFeatureBasicSm' + db_nameo = 'db_magnaaudiofeat_basicsm'; + + case 'MTTTagFeatureGenreBasic' + db_nameo = 'db_magnatagfeat_genrebasic'; + + case 'MTTMixedFeatureGenreBasicSm' + db_nameo = 'db_magnamixedfeat_genrebasicsm'; + + otherwise + db_nameo = sprintf('db_%s', type); + end + end + + end +end + + + + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTAudioFeatureRAW.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTAudioFeatureRAW.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,223 @@ +% -- +% This class loads and hanles the aufdio features included with the MTT +% Library +% --- + + +classdef MTTAudioFeatureRAW < MTTAudioFeature & handle + + + properties(Constant = true) + + my_revision = str2double(substr('$Rev$', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = struct([]); + + end + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTAudioFeatureRAW(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + + end + + % --- + % load feature data from xml file + % --- + function data = extract(feature, clip) + % load feature data by parsing xml + + global globalvars; + + % fprintf('parsing features for clip %d \n',clip.id()); + + % parse feature + data = xml_parse_mtt(clip.xmlfile_full()); + + % save info data + data.info.type = 'MTTAudioFeatureRAW'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + data.info.params = feature.my_params; + end + + function [a1, a2, a3] = visualise(feature) + % --- + % plots the different data types collected in this feature + % --- + for i = 1:numel(feature) + clip = MTTClip(feature(i).owner_id()); + + % --- + % labels for chroma data + % --- + chroma_labels = {'c', 'c#', 'd','d#', 'e', 'f','f#', 'g','g#', 'a','a#', 'h'}; + mode_labels = {'minor', 'major'}; + + % hange labels to reflect detected mode + chroma_labels{feature(i).data.key + 1} = ... + sprintf('(%s) %s',mode_labels{feature(i).data.mode + 1}, chroma_labels{feature(i).data.key + 1}); + + % --- + % calculate data for beats and sections + % --- + [bars, beats, tatums] = feature(i).infer_bar_times(); + + secs = [feature(i).data.sections(:).start; feature(i).data.sections(:).duration]; + + h = figure; + % number of subplots + n = 3; + + % --- + % chroma feature display + % --- + subplot(n,1,1); + + % get segment times and fix for same lengths for all plots + % --- + % NOTE: Last segment will appear longer + % --- + segments = [feature(i).data.segments(:).start]; + segments(end) = feature(i).data.duration; + + % display chroma vectors + uimagesc(segments, 0:11, [feature(i).data.segments(:).pitches]); + + set(gca,'YTick',[0:11], 'YTickLabel', chroma_labels); + + axis xy + colormap(hot) + %colorbar; + ylabel('chroma class'); + title(sprintf('clip %d: %s by %s, chromagram', ... + clip.id, clip.title(),clip.artist())); + + % added sections + axis([1 feature(i).data.duration -1 11.5]); + hl = line([secs(1,:); sum(secs,1)],ones(2, size(secs,2)) * -0.8); + set(hl,'LineWidth',6); + + a1 = gca; + + % --- + % mfcc feature display + % + % NOTE: the first position of timbre is reduced in energy, + % as this seems to introduce some corruption in lots of data + % --- + timbre = feature(i).data.segments(1).timbre; + timbre = timbre/ max(max(abs(timbre))) * ... + mean( mean( abs( cat( 2, ... + feature(i).data.segments(2:min(end,5)).timbre ... + )))); + + subplot(n,1,2); + uimagesc(segments, 0:11, [timbre feature(i).data.segments(2:end).timbre]); + + axis xy + %colorbar; + xlabel('time[s]'); + ylabel('mfcc coeff'); + title(sprintf('mfcc timbre features')); + + a2 = gca; + + % --- + % beats and sections + % --- + subplot(n,1,3); + axis([1 feature(i).data.duration -0.6 1.2]); + + hl = line([1 feature(i).data.duration],[0 0]); + set(hl, 'Color','g'); + + a3 = gca; + + % tatums + hl = line([tatums(1,:); tatums(1,:)],[ones(1, size(tatums,2)) * -0.2; max(-0.1,tatums(2,:))]); + set(hl,'LineWidth',1); + set(hl, 'Color','k'); + + % beats + hl = line([beats(1,:); beats(1,:)],[ones(1, size(beats,2)) * -0.4; max(-0.1,beats(2,:))]); + set(hl,'LineWidth',2); + set(hl, 'Color','b'); + + % bars + hl = line([bars(1,:); bars(1,:)],[ones(1, size(bars,2)) * -0.5; max(-0.1,bars(2,:))]); + set(hl,'LineWidth',4); + set(hl, 'Color','r'); + + % sections + hl = line([secs(1,:); sum(secs,1)],ones(2, size(secs,2)) * -0.5); + set(hl,'LineWidth',6); + + ylabel('confidence'); + title(sprintf('rhythmic features @%3.1f BPM, %d/4 meter',... + feature(i).data.tempo, feature(i).data.timeSignature)); + + end + end + + function [bars, beats, tatums] = infer_bar_times(feature) + % [bars, beats, tatums] = infer_bar_times(feature) + % + % extract bar and beat starting times from tatums + bar = feature.data.bars; + + beats = []; + bars = []; + tatums = []; + + % bars + for i = 1:numel(bar) + % beats + for j = 1:numel(bar(i).beat) + % tatums + for k = 1:numel(bar(i).beat(j).tatum) + tatum = bar(i).beat(j).tatum(k); + + % collect tatums and confidence + tatums(1:2,end+1) = [tatum.time,tatum.confidence]; + + + % --- + % save beat if this is the first tatum in it + % --- + if k == 1 + beats(1:2,end+1) = [tatum.time,... + bar(i).beat(j).confidence]; + + % --- + % save bar if this is the first tatum in it + % --- + if j == 1 + bars(1:2,end+1) = [tatum.time,... + bar(i).confidence]; + end + end + + + end + end + end + + % end function + end + end + +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTAudioFeatureSlaney08.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTAudioFeatureSlaney08.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,362 @@ +classdef MTTAudioFeatureSlaney08 < MTTAudioFeature & handle + % --- + % This Class contains + % a basic summary of MTT features complementary to those in + % MTTAudioFeatureBasicSm, features are extracted + % as described in Slaney 08 - LEARNING A METRIC FOR MUSIC SIMILARITY + % + % The usual workflow for these features constist of three steps + % 1. extract: extracts the basic single-file dependent features + % 2. define_global_transform: calculates the global feature + % transformation parameters + % 3. finalise: applies the common transformations to a specific feature + % --- + + properties(Constant = true) + + % svn hook + my_revision = str2double(substr('$Rev$', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = struct(... + 'norm_mttstats', 1, ... % + 'whiten_mttstats', 0, ... % NOTE: whitening as in slaney?? + 'select_mttstats', 1 ...% TODO: way to select certain features + ); + end + + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTAudioFeatureSlaney08(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + + end + % --- + % extract feature data from raw audio features + % --- + function data = extract(feature, clip) + % --- + % get features. this includes possible + % local normalisations + % --- + + global globalvars; + + % --- + % TODO: implement time_weighted version of the statistical + % evaluations below + % --- + rawf = clip.audio_features_raw(); + +% segmentDurationMean: mean segment duration (sec.). + data.mttstats.segmentDurationMean = mean([rawf.data.segments.duration]); + +% segmentDurationVariance: variance of the segment duration + data.mttstats.segmentDurationVariance = var([rawf.data.segments.duration]); + +% timeLoudnessMaxMean: mean time to the segment maximum, or attack duration (sec.). + data.mttstats.timeLoudnessMaxMean = mean([rawf.data.segments.loudness_max_time]); + +% loudnessMaxMean: mean of segments’ maximum loudness(dB). + data.mttstats.loudnessMaxMean = mean([rawf.data.segments.loudness_max]); + +% loudnessMaxVariance: variance of the segments’ maximum loudness (dB). + data.mttstats.loudnessMaxVariance = var([rawf.data.segments.loudness_max]); + +% loudnessBeginMean: average loudness at the start of segments (dB) + data.mttstats.loudnessBeginMean = mean([rawf.data.segments.loudness]); + +% loudnessBeginVariance: variance of the loudness at the startof segments (dB2). Correlated with loudnessMaxVariance + data.mttstats.loudnessBeginVariance = var([rawf.data.segments.loudness]); + +% loudnessDynamicsMean: average of overall dynamic rangein the segments (dB). +% loudnessDynamicsVariance: segment dynamic range variance +% (dB). Higher variances suggest more dynamics ineach segment. + % --- + % NOTE: the above information cannot be extracted from the MTT + % Features, maybe more recent echonest features allow for this + % --- + +% loudness: overall loudness estimate of the track (dB). + data.mttstats.loudness = rawf.data.loudness; + + % --- + % TODO: get these from the beat loundesses? + % --- + +% tempo: overall track tempo estimate (in beat per minute,BPM). Doubling and halving errors are possible. + data.mttstats.tempo = rawf.data.tempo; + +% tempoConfidence: a measure of the con?dence of the tempo estimate (beween 0 and 1). + data.mttstats.tempoConfidence = rawf.data.tempoConfidence; + + [~, beats, tatums] = rawf.infer_bar_times(); + +% beatVariance: ameasure of the regularity of the beat (secs). + if numel(beats) > 0 + bdiff = diff(beats(1,:)); + data.mttstats.beatVariance = var(bdiff); + else + + % --- + % This is a facke repolacement variance + % --- + data.mttstats.beatVariance = 0; + end + + +% tatum: estimated overall tatum duration (in seconds). Tatums are subdivisions of the beat. + % --- + % note: the tatum length could be also + % accessed by comparison with the global bpm estimate + % --- + if numel(tatums) > 0 + tdiff = diff(tatums(1,:)); + data.mttstats.tatum = median(tdiff); + + % tatumConfidence: a measure of the con?dence of the tatum estimate (beween 0 and 1). + data.mttstats.tatumConfidence = mean(tatums(2,:)); + + % numTatumsPerBeat: number of tatums per beat + data.mttstats.numTatumsPerBeat = median(bdiff) / data.mttstats.tatum; + else + % --- + % This is a facke replacement tatum + % TODO: maybe set confidence to -1? + % --- + + data.mttstats.tatum = 0; + + % tatumConfidence: a measure of the con?dence of the tatum estimate (beween 0 and 1). + + data.mttstats.tatumConfidence = 0; + + % numTatumsPerBeat: number of tatums per beat + data.mttstats.numTatumsPerBeat = 2; + end + + + % --- + % TODO: beat analysis + % --- + +% timeSignature: estimated time signature (number of beats per measure). (0-7 / 7) + data.mttstats.timeSignature = rawf.data.timeSignature; + +% timeSignatureStability: a rough estimate of the stability of the time signature throughout the track + data.mttstats.timeSignatureStability = rawf.data.timeSignatureConfidence; + + % --- + % prepare field for final features + % --- + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + + % save info data + data.info.type = 'MTTAudioFeatureSlaney08'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + end + + function define_global_transform(features) + % calculate and set normalization factors from the group of + % input features. These features will be set for the full database + + for i = 1:numel(features) + data = features(i).data.mttstats; + + final(:,i) = [data.segmentDurationMean; ... + data.segmentDurationVariance; ... + data.timeLoudnessMaxMean; ... + data.loudnessMaxMean; ... + data.loudnessMaxVariance; ... + data.loudnessBeginMean; ... + data.loudnessBeginVariance; ... + data.loudness; ... + data.tempo; ... + data.tempoConfidence; ... + data.beatVariance; ... + data.tatum; ... + data.tatumConfidence; ... + data.numTatumsPerBeat; ... + data.timeSignature; ... + data.timeSignatureStability]; + end + + if features(1).my_params.norm_mttstats + if numel(features) == 1 + error ('Insert feature array for this method, or set normalisation to 0'); + end + + % --- + % here, we only need to define the post-normalisation + % --- + [final, pstd] = mapminmax(final,0,1); + common.mttstats.pre_norm = pstd; + + % --- + % NOTE: whitening as in slaney?? + % Would make reading the + % mahal matrices really hard + % --- + + features(1).my_db.set_common(common); + + else + + features(1).my_db.set_common([1]); + end + + % save the normalised features straight away! + features.finalise(final); + end + + + function finalise(features, final) + % applies a final transformation and + % collects the information of this feature within a single vector + % see info for types in specific dimensions + % check if features have been finalised already + + % --- + % check for dummy feature + % --- + if isfield(features(1).my_params,'select_mttstats') && ... + isnumeric(features(1).my_params.select_mttstats) && ... + features(1).my_params.select_mttstats == 0 + + % if no information needed just fill everything 0 + for i = 1:numel(features) + features(i).data.final.vector = []; + features(i).data.final.dim = 0; + + % fill up info struct and append to feature + features(i).data.final.vector_info.labels = {}; + end + + return; + end + + % --- + % set feature labelling + % --- + info = {'segmentDurationMean', ... + 'segmentDurationVariance', ... + 'timeLoudnessMaxMean', ... + 'loudnessMaxMean', ... + 'loudnessMaxVariance', ... + 'loudnessBeginMean', ... + 'loudnessBeginVariance', ... + 'loudness', ... + 'tempo', ... + 'tempoConfidence', ... + 'beatVariance', ... + 'tatum', ... + 'tatumConfidence', ... + 'numTatumsPerBeat', ... + 'timeSignature', ... + 'timeSignatureStability'}; + + % --- + % construct resulting feature vector out of features + % --- + if nargin == 2 && isempty(final) + + % the final vector etc already are set to zero; + return; + + elseif nargin == 2 && (numel(features) == size(final, 2)) + for i = 1:numel(features) + + % check for neccesary parameters + if isempty(features(i).my_db.commondb) + + error('Define the global transformation first') + return; + end + + features(i).data.final.vector = final(:,i); + features(i).data.final.dim = size(final,1); + + % fill up info struct and append to feature + features(i).data.final.vector_info.labels = info; + end + else + % --- + % if features have been added after gettin gnormalisation + % parameters, ther should be still an option to include + % them + % --- + + for i = 1:numel(features) + + % check for neccesary parameters + if isempty(features(i).my_db.commondb) + + error('Define the global transformation first') + return; + end + + data = features(i).data.mttstats; + final = [data.segmentDurationMean; ... + data.segmentDurationVariance; ... + data.timeLoudnessMaxMean; ... + data.loudnessMaxMean; ... + data.loudnessMaxVariance; ... + data.loudnessBeginMean; ... + data.loudnessBeginVariance; ... + data.loudness; ... + data.tempo; ... + data.tempoConfidence; ... + data.beatVariance; ... + data.tatum; ... + data.tatumConfidence; ... + data.numTatumsPerBeat; ... + data.timeSignature; ... + data.timeSignatureStability]; + + if features(1).my_params.norm_mttstats == 1 + + [final] = mapminmax('apply', final, features(1).common.mttstats.pre_norm); + end + + features(i).data.final.vector = final; + features(i).data.final.dim = size(final,1); + + % fill up info struct and append to feature + features(i).data.final.vector_info.labels = info; + end + + end + + % --- + % TODO: Maybe delete more basic features again at this point? + % --- + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTClip.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTClip.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,336 @@ +% --- +% This is the class for magnatagatune clips +% --- + +classdef MTTClip < handle + + % --- + % here come the internal clip properties. + % the database is stored as a global variable + % --- + properties (SetAccess = private) + + % magnatagatune clip id + my_id; + end + + % do not save whole db into mat file + properties (Hidden, Transient) + + my_db; + end + + properties (Hidden) + + my_dbpos; + end + + % --- + % here come the clip functions. + % --- + methods + + % --- + % simple constructor + % --- + function clip = MTTClip(id) + + % usual or empty constructor ? + if nargin > 0 + + % --- + % check for magnatagatunedatabase + % and load it if not present + % --- + global db_magnaclips; + + if ~isempty(db_magnaclips)% exist('clip_info_proper') + clip.my_db = db_magnaclips; + else + + error 'db not found'; + % dbload; + end + + % --- + % recursive call for creating multiple clips + % --- + if numel(id) > 1 + + % multi-id case + clip = MTTClip(); + for i = 1:numel(id) + clip(i) = MTTClip(id(i)); + end + + else + % --- + % actual constructor + % --- + clip.my_id = id; + clip.my_dbpos = clip.dbpos(); + + if isempty(clip.my_dbpos) + error ('Clip %d not found in DB', full(id)) + end + end + else + + % --- + % TODO: deal with empty constructor + % --- + end + end + + % --- + % member functions + % --- + + % returns the id (function neccessary for + % multi-clip environments) + function out = id(this) + + out = [this.my_id]; + end + + function out = comparison_id(this) + + out = [this(1).my_db.comparison_ids(this)]; + end + + function out = title(this) + % returns name strings for given genre position ids + + out = this.my_db.clip_info_proper{this.my_dbpos,3}; + end + + function out = album(this) + % returns name strings for given genre position ids + + out = this.my_db.clip_info_proper{this.my_dbpos,5}; + end + + function out = artist(this) + % returns name strings for given genre position ids + + out = this.my_db.artistdb.annots(this.id); + out = out{1}; + end + + function out = artist_id(this) + % returns name strings for given genre position ids + + out = this.my_db.artistdb.annotids_for_owner(this.id); + end + + function my_tag_ids = tag_ids(this) + % returns clip tag posids for given clip id + + my_tag_ids = this.my_db.tagdb.annotids_for_owner(this.id); + end + + function out = tags(this) + % returns name strings for given genre position ids + + out = this.my_db.tagdb.annots(this.id); + end + + % caution. last.fm dbs take a clip + function [out, score, annotids] = fmtags(this) + + [out, score, annotids] = this.my_db.fmtagdb.annots(this); + end + + % caution. last.fm dbs take a clip + function [out, score] = fmtag_ids(this) + + [out, score] = this.my_db.fmtagdb.annotids_for_owner(this); + end + + function [out, score] = genre_ids(this) + % returns clip genre for given clip id + + [out, score] = this(1).my_db.genredb.annotids_for_owner([this.id]); + end + + function [out, score, annotids] = genres(this) + % returns name strings for given genre position ids + + [out, score, annotids] = this(1).my_db.genredb.annots([this.id]); + end + + + function print(this) + % prints out the album info + + fprintf('clip %d: %s by %s,\n on %s \n',this.id, ... + this.title(), this.artist(), this.album()); + + v = strcat(this.genres(),', '); + fprintf(' genres: %s\n', strcat(v{:})); + + v = strcat(this.tags(),', '); + fprintf(' tags: %s\n', strcat(v{:})); + + v = strcat(this.fmtags(),', '); + fprintf(' last.fm tags: %s\n', strcat(v{:})); + + % if nargout == 0 + % fprintf(out) + % end + end + + function out = mp3file(this) + % returns filename for given genre id + + out = this.my_db.clip_info_proper{this.my_dbpos,10}; + end + + function filename = mp3file_full(this) + % returns mp3 filename for given clip id + + global globalvars; + filename = strcat(globalvars.tstaudiopath,this.mp3file()); + filename = strrep(filename,'\',globalvars.systemslash); + filename = strrep(filename,'/',globalvars.systemslash); + end + + function filename = xmlfile_full(this) + % returns xml filename for given clip id + + % --- + % NOTE: the xml files are supposed to be in the folder + % "xml" as subfolder of the mp3 file path + % --- + global globalvars; + filename = strcat(globalvars.tstaudiopath,'xml\',... + this.mp3file,'.xml'); + + filename = strrep(filename,'/',globalvars.systemslash); + filename = strrep(filename,'\',globalvars.systemslash); + end + + function len = play(clips, plen) + % len = play(clips) + % + % plays magnatune clip given by clip id, and + % returns full playback length + + if nargin <2 + plen = 0; %seconds play + end + + len = 0; + for i = 1:numel(clips) + + % get sample rate + [null,sr] = mp3read(clips(i).mp3file_full(),0); + + if plen > 0 + + % read mp3 file + [src,sr,NBITS,OPTS] = mp3read(clips(i).mp3file_full(), plen*sr); + else + % read full mp3 file + [src,sr,NBITS,OPTS] = mp3read(clips(i).mp3file_full()); + end + + % --- + % NOTE: sound() seems to pause the system when trying to + % play a clip while still playing another one + % --- + sound(src,sr); + + fprintf('\n--- now playing ---\n'); + clips(i).print(); + + % add clip lengths + len = len + length(src) / sr; + end + end + + function skip(clips) + % skips through given clips + + clips.play(5); + end + + function out = dbpos(this) + % returns matrix position for given clip id + + out = find(this.my_db.annots_ids == this.id, 1 ,'first'); + end + + % --- + % Generic Features + % --- + function feature = features(clip, type, varargin) + % feature = features(clip, type, varargin) + % + % returns the features of type "type" for given clips and + % parameters + + db_name = MTTAudioFeatureDBgen.db_name(type); + + % global database + eval( sprintf( 'global %s;', db_name)); + + % create database if neccesary + if eval(sprintf('isempty(%s);', db_name)); + + eval(sprintf('%s = MTTAudioFeatureDBgen(''%s'');', db_name, type)); + end + + % retrieve features from db + feature = eval(sprintf('%s.get_features(clip, varargin{:});', db_name)); + end + + % --- + % Audio Features Section + % --- + function features = audio_features_raw(clip) + % get the features from the global database + + features = clip.features('MTTAudioFeatureRAW'); + end + + function features = audio_features_basicsm(clip, varargin) + % get the features from the global database + + features = clip.features('MTTAudioFeatureBasicSm', varargin{:}); + end + + function features = genre_features_basic(clip, varargin) + % get the features from the global database + + features = clip.features('MTTTagFeatureGenreBasic', varargin{:}); + + end + + function features = mixed_features_genrebasicsm(clip, varargin) + % get the features from the global database + + features = clip.features('MTTMixedFeatureGenreBasicSm', varargin{:}); + end + + function features = mixed_features_genrebasicsm_pca(clip, varargin) + % get the features from the global database + + features = clip.features('MTTMixedFeatureGenreBasicSmPCA', varargin{:}); + end + + function features = random_features(clip, varargin) + % get the features from the global database + + features = clip.features('MTTRandomFeature', varargin{:}); + end + + end + + % --- + % static methods + % --- + methods(Static = true) + + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTClipDB.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTClipDB.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,140 @@ +% --- +% This is the wrapper and loader class for magnatagatune db +% and related data +% --- + +classdef MTTClipDB < handle + + properties (SetAccess = 'private') + + % --- + % TODO: create global "db_magnaclips" class pointer + % --- + + annots; + annots_filenames; + annots_ids; + annots_names; + + % inverse ids for comparison + comparison_ids; + + clip_info_proper; + clip_info_proper_names; + +% magnagenre_childof; + + % genre database + genredb; + + % artist database + artistdb; + + % tag databses + tagdb; + + fmtagdb; + + + end + + properties (Hidden) + % hidden properties + + end + + methods + + % --- + % constructore + % --- + function db = MTTClipDB() + + % --- + % TODO: restructure data / db.mat + % --- + + load 'db.mat'; + + db.annots = annots; + db.annots_ids = annots_ids; + db.annots_names = annots_names; + db.annots_filenames = annots_filenames; + + % comparison ids + db.comparison_ids = sparse(comparison_ids,1, 1:numel(comparison_ids)); + + db.clip_info_proper = clip_info_proper; + db.clip_info_proper_names = clip_info_proper_names; + + % Genre in new structure for textual annotations + db.genredb = AnnotDB(magnagenres, clip_magnagenres, annots_ids); + + % Artist for each clip + db.artistdb = AnnotDB('clips_by_annot', ... + db.clip_info_proper(:, 4), annots_ids); + + % Magnatagatune Tags + db.tagdb = AnnotDB(annots_names, annots, annots_ids); + + % Last Fm Tags + db.fmtagdb = LFMTagsDB(db.artistdb); + + + +% db.clip_magnagenres = clip_magnagenres; +% db.magnagenres = magnagenres; +% db.magnagenre_childof = magnagenre_childof; + + end + + % --- + % member functions + % --- + function out = genres(db) + % returns the magnatune genre list + + out = db.genredb.lexicon; + end + + function clips = clips_by_genre_name(db, name) + % returns all clips having the assigned genre + + clips = MTTClip( db.genredb.owner( name)); + end + + function out = tags(db) + % returns the magnatune artist list + + out = db.tagdb.lexicon; + end + + + function out = artists(db) + % returns the magnatune artist list + + out = db.artistdb.lexicon; + end + + function clips = clips_by_artist_name(db, name) + % returns all clips having the assigned artist + + clips = MTTClip( db.artistdb.owner( name)); + end + end + + % --- + % Hidden Methods + % --- + methods (Hidden = true, Access = private) + + function out = clips_by_genre(db, genre_id) + % returns clip ids given a genre id + + pos = (db.clip_magnagenres(:,genre_id) == 1); + + % return clip ids, not pos + out = db.annots_ids(pos); + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureGenreBasicSm.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureGenreBasicSm.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,175 @@ +classdef MTTMixedFeatureGenreBasicSm < MTTAudioFeature & handle + % --- + % + % The usual worklow for these features constist of three steps + % 1. extract: extracts the basic single-file dependent features + % 2. define_global_transform: calculates the global feature + % transformation parameters + % 3. finalise: applies the common transformations to a specific feature + % --- + + properties(Constant = true) + + % svn hook + my_revision = str2double(substr('$Rev$', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = MTTAudioFeature.inherited_params(... + 'MTTAudioFeatureBasicSm', ... + ... % --- + ... % following are GenreBasic parameters + ... % --- + 'pct_genres', 1 ... % 1/100 percentile genre tags used + ); + end + + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureGenreBasicSm(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + % --- + % extract feature data by combining genre adn basicsm features + % --- + function data = extract(feature, clip) + + % --- + % get Basic Summary audio features. this includes possible + % local normalisations + % --- + basicsm = clip.audio_features_basicsm(feature.my_params); + + % --- + % get genre tag features + % --- + + genrebasic = clip.genre_features_basic(feature.my_params); + + % save to features data field + data.audio = basicsm; + data.tags = genrebasic; + + % prepare field for final features + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + + % save info data + data.info.type = 'MTTMixedFeatureGenreBasicSm'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + end + + function define_global_transform(features) + % calculate and set normalization factors from the group of + % input features. These features will be set for the full database + + if numel(features) == 1 + error ('Insert feature array for this method'); + end + + % --- + % We collect all the relevant basicsm + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + basicsm(i) = features(i).data.audio; + end + + % call the features own transsform function + basicsm.define_global_transform(); + + % --- + % We collect all the relevant genretag + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + genrebasic(i) = features(i).data.tags; + end + + % call the features own transsform function + genrebasic.define_global_transform(); + + % --- + % set common feature values for mixed features + % --- + features(1).my_db.set_common([1]); %trivial common + end + + + function finalise(feature) + % applies a final transformation and collects the + % information of this feature within a single vector + % see info for types in specific dimensions + + for i = 1:numel(feature) + + % check for neccesary parameters + if isempty(feature(i).my_db.commondb) + + error('Define the global transformation first'); + end + + % --- + % finalise audio feature and get vector + % --- + basicsm = feature(i).data.audio; + basicsm.finalise(); + + % finalise tag features + genrebasic = feature(i).data.tags; + genrebasic.finalise; + + % --- + % final data assembly + % --- + + % concatenate vectors + feature(i).data.final.vector = ... + [basicsm.vector() ; genrebasic.vector()]; + + % add feature dimensions + feature(i).data.final.dim = basicsm.dim + genrebasic.dim; + + % concatenate labels + feature(i).data.final.vector_info.labels = ... + {basicsm.data.final.vector_info.labels{:}, ... + genrebasic.data.final.vector_info.labels{:}}; + end + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + + function visualise(feature) + % --- + % plots the different data types collected in this feature + % --- + for i = 1:numel(feature) + clip = MTTClip(feature(i).owner_id()); + end + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureGenreBasicSmPCA.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureGenreBasicSmPCA.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,265 @@ +% -- +% This class loads and hanles the aufdio features included with the MTT +% Library +% --- +classdef MTTMixedFeatureGenreBasicSmPCA < MTTAudioFeature & handle + + properties(Constant = true) + + my_revision = str2double(substr('$Rev$', 5, -1)); + end + + properties + + % --- + % Set default parameters + % --- + my_basetype = 'MTTMixedFeatureGenreBasicSm'; + + my_params = MTTAudioFeature.inherited_params(... + 'MTTMixedFeatureGenreBasicSm', ... + 'min_pca_var', 0, ... % % fraction of variance to keep + 'max_pca_coeffs', 0, ...% max. number of final coefficients + 'norm_pre_pca', 1, ...% normalise pca coefficients after transformation + 'norm_post_pca', 1 ... + ); + end + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureGenreBasicSmPCA(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + % --- + % load feature data from xml file + % --- + function data = extract(feature, clip) + % load feature data by parsing xml + + global globalvars; + + % --- + % we extract the base features, and save + % the pointers to these. + % the main work is then done in the define_global_transf + % and finalise functions. + % --- + data.basefeat = clip.features(feature.my_basetype,... + feature.my_params); + + % save info data + data.info.type = 'MTTMixedFeatureGenreBasicSmPCA'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save param data + data.info.params = feature.my_params; + + % prepare field for final features + data.final.vector = []; + data.final.dim = 0; + data.final.vector_info.labels = {}; + end + + function define_global_transform(features) + + if numel(features) == 1 + error ('Insert feature array for this method'); + end + + % --- + % We collect all the relevant genretag + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + basef(i) = features(i).data.basefeat; + end + + % call the features own transsform function + basef.define_global_transform(); + + % --- + % finalise the basic features, and + % get the feature vectors; + % --- + basef.finalise(); + X = basef.vector(); + + % check dataset dimension + if numel(features) < basef.dim; + + error ('Not enough feature vectors for PCA calculation. need %d samples', ... + basef.dim); + end + + % --- + % NOTE: should the data be normalised and scaled to -1:1 + % instead of being in a range of 0-1 AND max-min = 1 + % --- + if features(1).my_params.norm_pre_pca == 1 + + [X, pstd] = mapminmax(X,-1,1); + common.pca.pre_norm = pstd; + elseif features(1).my_params.norm_pre_pca == 2 + + [X, pstd] = mapstd(X,0,1); + common.pca.pre_norm = pstd; + end + + + % --- + % get and apply the principal component analysis + % NOTE: the variance percentile is applied here, too. + % --- + + [Y, ps] = processpca(X, 0); + common.pca.transform = ps; + + % --- + % get cumulative sum of variance, and decide on cutoff + % point + % --- + v = cumsum(var(Y')); + v = v / max(v); + common.pca.transform.varpart = v; + + if features(1).my_params.min_pca_var > 0 + + min_pca_idx = find(v >= features(1).my_params.min_pca_var,1, 'first'); + + % save into pca structure + common.pca.transform.yrows = min_pca_idx; + + end + + % normalise pca values after processing + if features(1).my_params.norm_post_pca + + [Y,pmm] = mapminmax(Y,0,1); + common.pca.post_norm = pmm; + + end + + % --- + % set common feature values for mixed features + % --- + features(1).my_db.set_common(common); + + % save the transformed features straight away! + features.finalise(Y); + end + + function finalise(feature, final) + % applies a final transformation and + % collects the information of this feature within a single vector + % see info for types in specific dimensions + + % determine size + if feature(1).my_params.max_pca_coeffs > 0 + + max_size = min(feature(1).common.pca.transform.yrows, ... + feature(1).my_params.max_pca_coeffs); + else + + max_size = feature(1).common.pca.transform.yrows; + end + + + % prepare information + info = {'PCA'}; + if isfield(feature(1).common.pca.transform, 'varpart') + info(2:max_size) = num2cell(feature(1).common.pca.transform.varpart(2:max_size)); + else + info(2:max_size) = num2cell(2:max_size); + end + + + % check if features have been finalised already + if nargin == 2 && isempty(final) + + % the final vector etc already are set to zero; + return; + + elseif nargin == 2 && (numel(feature) == size(final, 2)) + + for i = 1:numel(feature) + + % save final vector and description + feature(i).data.final.vector = final(1:max_size,i); + feature(i).data.final.dim = max_size; + feature(i).data.final.vector_info.labels = info; + end + + else + % features have to be transformed first + % --- + % TODO: this code remains untested + % --- + + % check for neccesary parameters + if isempty(feature(1).my_db.commondb) + + error('Define the global transformation first') + return; + end + + + for i = 1:numel(feature) + + % check for neccesary parameters + if isempty(feature(i).my_db.commondb) + + error('Define the global transformation first') + end + + % --- + % get feature vector and apply transformation + % --- + X = feature(i).data.basefeat.vector(); + + % --- + % apply normalisation used for removing mean + % in training data + % --- + if feature(1).my_params.norm_pre_pca == 1 + + X = mapminmax('apply', X, feature(1).common.pca.pre_norm); + elseif feature(1).my_params.norm_pre_pca == 2 + + X = mapstd('apply', X, feature(1).common.pca.pre_norm); + end + + % apply PCA transform + vec = processpca('apply', X, feature(1).common.pca.transform); + + % normalise pca values after transformation + if feature(1).my_params.norm_post_pca + + vec = mapminmax('apply', vec,... + feature(1).common.pca.post_norm); + end + + % --- + % cut vector to final size. + % NOTE: this should be done before + % transformation to reduce computation time + % --- + vec = vec(1:max_size); + + % save final vector and description + feature(i).data.final.vector = vec; + feature(i).data.final.dim = numel(vec); + feature(i).data.final.vector_info.labels = info; + end + end + end + + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureGenreRandom.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureGenreRandom.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,148 @@ +classdef MTTMixedFeatureGenreRandom < MTTAudioFeature & handle + % --- + % + % The usual worklow for these features constist of three steps + % 1. extract: extracts the basic single-file dependent features + % 2. define_global_transform: calculates the global feature + % transformation parameters + % 3. finalise: applies the common transformations to a specific feature + % --- + + properties(Constant = true) + + % svn hook + my_revision = str2double(substr('$Rev: 455 $', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = MTTAudioFeature.inherited_params(... + 'MTTTagFeatureGenreBasic', ... + ... % --- + ... % following are Random parameters + ... % --- + 'nrandoms', 100 ... + ); + end + + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureGenreRandom(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + % --- + % extract feature data by combining genre adn basicsm features + % --- + function data = extract(feature, clip) + + % --- + % get MTTMixedFeatureGenreBasicSm this includes possible + % local normalisations + % --- + data.tags = clip.features('MTTTagFeatureGenreBasic',feature.my_params); + + % --- + % get genre tag features + % --- + + data.random = clip.features('MTTRandomFeature',feature.my_params); + + + % prepare field for final features + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + + % save info data + data.info.type = class(feature); + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + end + + function define_global_transform(features) + % calculate and set normalization factors from the group of + % input features. These features will be set for the full database + + if numel(features) == 1 + error ('Insert feature array for this method'); + end + + % --- + % We collect all the relevant tag + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + tags(i) = features(i).data.tags; + end + + % call the features own transsform function + tags.define_global_transform(); + + % --- + % Random features have no global transform + % --- + + % --- + % set common feature values for mixed features + % --- + features(1).my_db.set_common([1]); %trivial common + end + + + function finalise(feature) + % applies a final transformation and collects the + % information of this feature within a single vector + % see info for types in specific dimensions + + for i = 1:numel(feature) + + % check for neccesary parameters + if isempty(feature(i).my_db.commondb) + + error('Define the global transformation first'); + end + + % --- + % final data assembly + % --- + % concatenate vectors + feature(i).data.final.vector = ... + [feature(i).data.tags.vector() ;... + feature(i).data.random.vector()]; + + % add up feature dimensions + feature(i).data.final.dim = feature(i).data.tags.dim... + + feature(i).data.random.dim; + + % concatenate labels + lbl1 = feature(i).data.tags.labels(); + lbl2 = feature(i).data.random.labels(); + feature(i).data.final.vector_info.labels = ... + {lbl1{:}, lbl2{:}}; + end + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureSlaney08GenreBasicSm.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureSlaney08GenreBasicSm.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,159 @@ +classdef MTTMixedFeatureSlaney08GenreBasicSm < MTTAudioFeature & handle + % --- + % + % + % The usual worklow for these features constist of three steps + % 1. extract: extracts the basic single-file dependent features + % 2. define_global_transform: calculates the global feature + % transformation parameters + % 3. finalise: applies the common transformations to a specific feature + % --- + + properties(Constant = true) + + % svn hook + my_revision = str2double(substr('$Rev$', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = MTTAudioFeature.inherited_params(... + 'MTTMixedFeatureGenreBasicSm', ... + ... % --- + ... % following are Slaney08 parameters + ... % --- + 'norm_mttstats', 1, ... % + 'whiten_mttstats', 0, ... % NOTE: whitening as in slaney?? + 'select_mttstats', 1 ...% select certain features + ); + end + + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureSlaney08GenreBasicSm(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + % --- + % extract feature data by combining genre adn basicsm features + % --- + function data = extract(feature, clip) + + % --- + % get MTTMixedFeatureGenreBasicSm this includes possible + % local normalisations + % --- + data.genrebasicsm = clip.features('MTTMixedFeatureGenreBasicSm',feature.my_params); + + % --- + % get genre tag features + % --- + + data.mttstats = clip.features('MTTAudioFeatureSlaney08',feature.my_params); + + + % prepare field for final features + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + + % save info data + data.info.type = class(feature); + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + end + + function define_global_transform(features) + % calculate and set normalization factors from the group of + % input features. These features will be set for the full database + + if numel(features) == 1 + error ('Insert feature array for this method'); + end + + % --- + % We collect all the relevant genrebasicsm + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + genrebasicsm(i) = features(i).data.genrebasicsm; + end + + % call the features own transsform function + genrebasicsm.define_global_transform(); + + % --- + % We collect all the relevant mttstats + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + mttstats(i) = features(i).data.mttstats; + end + + % call the features own transsform function + mttstats.define_global_transform(); + + % --- + % set common feature values for mixed features + % --- + features(1).my_db.set_common([1]); %trivial common + end + + + function finalise(feature) + % applies a final transformation and collects the + % information of this feature within a single vector + % see info for types in specific dimensions + + for i = 1:numel(feature) + + % check for neccesary parameters + if isempty(feature(i).my_db.commondb) + + error('Define the global transformation first'); + end + + % --- + % final data assembly + % --- + % concatenate vectors + feature(i).data.final.vector = ... + [feature(i).data.genrebasicsm.vector() ;... + feature(i).data.mttstats.vector()]; + + % add up feature dimensions + feature(i).data.final.dim = feature(i).data.genrebasicsm.dim... + + feature(i).data.mttstats.dim; + + % concatenate labels + lbl1 = feature(i).data.genrebasicsm.labels(); + lbl2 = feature(i).data.mttstats.labels(); + feature(i).data.final.vector_info.labels = ... + {lbl1{:}, lbl2{:}}; + end + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureSlaney08GenreBasicSmPCA.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureSlaney08GenreBasicSmPCA.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,264 @@ +% -- +% This class loads and hanles the aufdio features included with the MTT +% Library +% --- +classdef MTTMixedFeatureSlaney08GenreBasicSmPCA < MTTAudioFeature & handle + + properties(Constant = true) + + my_revision = str2double(substr('$Rev$', 5, -1)); + end + + properties + + % --- + % Set default parameters + % --- + my_basetype = 'MTTMixedFeatureSlaney08GenreBasicSm'; + + my_params = MTTAudioFeature.inherited_params(... + 'MTTMixedFeatureSlaney08GenreBasicSm', ... + 'min_pca_var', 0, ... % fraction of variance to keep + 'max_pca_coeffs', 0, ...% max. number of final coefficients + 'norm_pre_pca', 1, ...% normalise pca coefficients after transformation + 'norm_post_pca', 1 ... + ); + end + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureSlaney08GenreBasicSmPCA(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + % --- + % load feature data from xml file + % --- + function data = extract(feature, clip) + % load feature data by parsing xml + + global globalvars; + + % --- + % we extract the base features, and save + % the pointers to these. + % the main work is then done in the define_global_transf + % and finalise functions. + % --- + data.basefeat = clip.features(feature.my_basetype,... + feature.my_params); + + % save info data + data.info.type = class(feature); + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save param data + data.info.params = feature.my_params; + + % prepare field for final features + data.final.vector = []; + data.final.dim = 0; + data.final.vector_info.labels = {}; + end + + function define_global_transform(features) + + if numel(features) == 1 + error ('Insert feature array for this method'); + end + + % --- + % We collect all the relevant genretag + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + basef(i) = features(i).data.basefeat; + end + + % call the features own transsform function + basef.define_global_transform(); + + % --- + % finalise the basic features, and + % get the feature vectors; + % --- + X = basef.vector(); + + % check dataset dimension + if numel(features) < basef.dim; + + error ('Not enough feature vectors for PCA calculation. need %d samples', ... + basef.dim); + end + + % --- + % NOTE: should the data be normalised and scaled to -1:1 + % instead of being in a range of 0-1 AND max-min = 1 + % --- + if features(1).my_params.norm_pre_pca == 1 + + [X, pstd] = mapminmax(X,-1,1); + common.pca.pre_norm = pstd; + elseif features(1).my_params.norm_pre_pca == 2 + + [X, pstd] = mapstd(X,0,1); + common.pca.pre_norm = pstd; + end + + + % --- + % get and apply the principal component analysis + % NOTE: the variance percentile is applied here, too. + % --- + + [Y, ps] = processpca(X, 0); + common.pca.transform = ps; + + % --- + % get cumulative sum of variance, and decide on cutoff + % point + % --- + v = cumsum(var(Y')); + v = v / max(v); + common.pca.transform.varpart = v; + + if features(1).my_params.min_pca_var > 0 + + min_pca_idx = find(v >= features(1).my_params.min_pca_var,1, 'first'); + + % save into pca structure + common.pca.transform.yrows = min_pca_idx; + + end + + % normalise pca values after processing + if features(1).my_params.norm_post_pca + + [Y,pmm] = mapminmax(Y,0,1); + common.pca.post_norm = pmm; + + end + + % --- + % set common feature values for mixed features + % --- + features(1).my_db.set_common(common); + + % save the transformed features straight away! + features.finalise(Y); + end + + function finalise(feature, final) + % applies a final transformation and + % collects the information of this feature within a single vector + % see info for types in specific dimensions + + % determine size + if feature(1).my_params.max_pca_coeffs > 0 + + max_size = min(feature(1).common.pca.transform.yrows, ... + feature(1).my_params.max_pca_coeffs); + else + + max_size = feature(1).common.pca.transform.yrows; + end + + + % prepare information + info = {'PCA'}; + if isfield(feature(1).common.pca.transform, 'varpart') + info(2:max_size) = num2cell(feature(1).common.pca.transform.varpart(2:max_size)); + else + info(2:max_size) = num2cell(2:max_size); + end + + + % check if features have been finalised already + if nargin == 2 && isempty(final) + + % the final vector etc already are set to zero; + return; + + elseif nargin == 2 && (numel(feature) == size(final, 2)) + + for i = 1:numel(feature) + + % save final vector and description + feature(i).data.final.vector = final(1:max_size,i); + feature(i).data.final.dim = max_size; + feature(i).data.final.vector_info.labels = info; + end + + else + % features have to be transformed first + % --- + % TODO: this code remains untested + % --- + + % check for neccesary parameters + if isempty(feature(1).my_db.commondb) + + error('Define the global transformation first') + return; + end + + + for i = 1:numel(feature) + + % check for neccesary parameters + if isempty(feature(i).my_db.commondb) + + error('Define the global transformation first') + end + + % --- + % get feature vector and apply transformation + % --- + X = feature(i).data.basefeat.vector(); + + % --- + % apply normalisation used for removing mean + % in training data + % --- + if feature(1).my_params.norm_pre_pca == 1 + + X = mapminmax('apply', X, feature(1).common.pca.pre_norm); + elseif feature(1).my_params.norm_pre_pca == 2 + + X = mapstd('apply', X, feature(1).common.pca.pre_norm); + end + + % apply PCA transform + vec = processpca('apply', X, feature(1).common.pca.transform); + + % normalise pca values after transformation + if feature(1).my_params.norm_post_pca + + vec = mapminmax('apply', vec,... + feature(1).common.pca.post_norm); + end + + % --- + % cut vector to final size. + % NOTE: this should be done before + % transformation to reduce computation time + % --- + vec = vec(1:max_size); + + % save final vector and description + feature(i).data.final.vector = vec; + feature(i).data.final.dim = numel(vec); + feature(i).data.final.vector_info.labels = info; + end + end + end + + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureSon.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureSon.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,213 @@ +classdef MTTMixedFeatureSon < MTTAudioFeature & handle + % --- + % This Class contains + % features are extracted + % as described in Slaney 08 - LEARNING A METRIC FOR MUSIC SIMILARITY + % + % The usual workflow for these features constist of three steps + % 1. extract: extracts the basic single-file dependent features + % 2. define_global_transform: calculates the global feature + % transformation parameters + % 3. finalise: applies the common transformations to a specific feature + % --- + + properties(Constant = true) + + % svn hook + my_revision = str2double(substr('$Rev: 741 $', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = struct( ... + 'son_filename','features_rbm_50x1010' ... + ); + end + + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureSon(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + + end + % --- + % extract feature data from raw audio features + % --- + function data = extract(feature, clip) + % --- + % get features. this includes possible + % local normalisations + % --- + + global globalvars; + global comparison_ids; + global sonfeatbase; + global db_magnaclips; + + if isempty(sonfeatbase); + sonfeatbase = load(feature.my_params.son_filename); + end + % + + % --- + % note: this should reference clip.my_db + % get the actual clip id + idx = db_magnaclips.comparison_ids(clip.id); + + % --- + % NOTE: we just copy everything in a big matrix and then + % normalise the data later + % --- + + if idx <= size(sonfeatbase.nfvec,2) + + % get the vector from loaded data + data.sonraw = sonfeatbase.nfvec(:,idx); + + else + % --- + % CAVE: Clip indices outside + % the range of the supplied mat file + % are filled up with zeros + % --- + data.sonraw = zeros(size(sonfeatbase.nfvec,1),1); + end + + + + data.vector_info = {'Sonfeat'}; + % padd further info struct + data.vector_info(end+1:numel(data.sonraw)) =... + cell(numel(data.sonraw) - numel(data.vector_info) , 1); + + % --- + % prepare field for final features + % --- + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + + % save info data + data.info.type = 'MTTMixedFeatureSon'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + end + + function define_global_transform(features) + % calculate and set normalization factors from the group of + % input features. These features will be set for the full database + + final = zeros(numel(features(1).data.sonraw), numel(features)); + for i = 1:numel(features) + if ~isempty(features(i).data.sonraw) + final(:,i) = features(i).data.sonraw; + end + end + + % set common to 1 to tell normalisation is done + features(1).my_db.set_common([1]); + + % save the normalised features straight away! + features.finalise(final); + end + + + function finalise(features, final) + % applies a final transformation and + % collects the information of this feature within a single vector + % see info for types in specific dimensions + % check if features have been finalised already + + % --- + % set feature labelling + % --- + + info = {}; + + % --- + % construct resulting feature vector out of features + % --- + if nargin == 2 && isempty(final) + + % the final vector etc already are set to zero; + return; + + elseif nargin == 2 && (numel(features) == size(final, 2)) + % the features have already been preassembled + + for i = 1:numel(features) + + % check for neccesary parameters + if isempty(features(i).my_db.commondb) + + error('Define the global transformation first') + return; + end + + features(i).data.final.vector = final(:,i); + features(i).data.final.dim = size(final,1); + + % fill up info struct and append to feature + features(i).data.final.vector_info.labels = ... + features(i).data.vector_info; + end + else + % --- + % if features have been added after gettin gnormalisation + % parameters, ther should be still an option to include + % them + % --- + + for i = 1:numel(features) + + % check for neccesary parameters + if isempty(features(i).my_db.commondb) + + error('Define the global transformation first') + return; + end + + final = zeros(numel(features(1).data.sonraw), numel(features)); + for i = 1:numel(features) + if ~isempty(features(i).data.sonraw) + final(:,i) = features(i).data.sonraw; + end + end + features(i).data.final.vector = final; + features(i).data.final.dim = size(final,1); + + % fill up info struct and append to feature + features(i).data.final.vector_info.labels = ... + features(i).data.vector_info; + end + + end + + % --- + % TODO: Maybe delete more basic features again at this point? + % --- + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureStober11.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureStober11.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,273 @@ +classdef MTTMixedFeatureStober11 < MTTAudioFeature & handle + % --- + % This Class contains + % features are extracted + % as described in Slaney 08 - LEARNING A METRIC FOR MUSIC SIMILARITY + % + % The usual workflow for these features constist of three steps + % 1. extract: extracts the basic single-file dependent features + % 2. define_global_transform: calculates the global feature + % transformation parameters + % 3. finalise: applies the common transformations to a specific feature + % --- + + properties(Constant = true) + + % svn hook + my_revision = str2double(substr('$Rev$', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = struct(... + 'stob_lowaudio', 1, ... + 'stob_highaudio', 1, ... % + 'stob_tags', 1, ... % + 'stob_norm', 1 ... + ); + end + + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureStober11(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + + end + % --- + % extract feature data from raw audio features + % --- + function data = extract(feature, clip) + % --- + % get features. this includes possible + % local normalisations + % --- + + global globalvars; + global stobbase; + + if isempty(stobbase); + stobbase = load('features_stober'); + end + + % --- + % NOTE: we define feature sets which are included / + % excluded according to the specified parameters + % --- + + lowAudio = {'pitchMean', 'pitchSdev', 'timbreMean', 'timbreSdev'}; + + % highAudio features more or less correspond to slaney 08 features + hiAudio = {'energy','key','loudness','timeSignature',... + 'danceability', 'mode', 'tempo'}; + + metadat = {'tags'}; + + allowedFeat = {}; + + % Select the features to keep + if feature(1).my_params.stob_lowaudio + allowedFeat = {allowedFeat{:}, lowAudio{:}}; + end + if feature(1).my_params.stob_highaudio + allowedFeat = {allowedFeat{:}, hiAudio{:}}; + end + if feature(1).my_params.stob_tags + allowedFeat = {allowedFeat{:}, metadat{:}}; + end + + % get the actual clip id + idx = find(stobbase.clipIds == clip.id); + + % --- + % NOTE: we just copy everything in a big matrix and then + % normalise the data later + % --- + data.vector_info = {}; + data.stobraw = []; + fields = fieldnames(stobbase); + for i = 1:numel(fields) + + % skip clip ID field + if strcmp(fields{i},'clipIds'); + continue; + end + + % skip unwanted features + if isempty(strcellfind(allowedFeat, fields{i})) + continue; + end + + % --- + % TODO: special case for tag features, including + % the tag names + % --- + + % put field info into right position + data.vector_info{numel(data.stobraw)+1} = fields{i}; + + % add data to feature + if size(stobbase.(fields{i}),1) == 1 + data.stobraw(end+1) = stobbase.(fields{i})(idx); + else + % concatenate vector + tmpdat = stobbase.(fields{i})(idx,:); + data.stobraw(end+1:end+numel(tmpdat)) = tmpdat; + end + end + % padd further info struct + data.vector_info(end+1:numel(data.stobraw)) =... + cell(numel(data.stobraw) - numel(data.vector_info) , 1); + + % --- + % prepare field for final features + % --- + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + + % save info data + data.info.type = 'MTTMixedFeatureStober11'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + end + + function define_global_transform(features) + % calculate and set normalization factors from the group of + % input features. These features will be set for the full database + + final = zeros(numel(features(1).data.stobraw), numel(features)); + for i = 1:numel(features) + if ~isempty(features(i).data.stobraw) + final(:,i) = features(i).data.stobraw'; + end + end + + if features(1).my_params.stob_norm + if numel(features) == 1 + error ('Insert feature array for this method, or set normalisation to 0'); + end + + % --- + % here, we only need to define the post-normalisation + % --- + [final, pstd] = mapminmax(final,0,1); + common.stobstats.post_norm = pstd; + + % --- + % NOTE: whitening as in slaney?? + % Would make reading the + % mahal matrices really hard + % --- + + features(1).my_db.set_common(common); + + else + + features(1).my_db.set_common([1]); + end + + % save the normalised features straight away! + features.finalise(final); + end + + + function finalise(features, final) + % applies a final transformation and + % collects the information of this feature within a single vector + % see info for types in specific dimensions + % check if features have been finalised already + + % --- + % set feature labelling + % --- + + info = {}; + + % --- + % construct resulting feature vector out of features + % --- + if nargin == 2 && isempty(final) + + % the final vector etc already are set to zero; + return; + + elseif nargin == 2 && (numel(features) == size(final, 2)) + % the features have already been preassembled + + for i = 1:numel(features) + + % check for neccesary parameters + if isempty(features(i).my_db.commondb) + + error('Define the global transformation first') + return; + end + + features(i).data.final.vector = final(:,i); + features(i).data.final.dim = size(final,1); + + % fill up info struct and append to feature + features(i).data.final.vector_info.labels = ... + features(i).data.vector_info; + end + else + % --- + % if features have been added after gettin gnormalisation + % parameters, ther should be still an option to include + % them + % --- + + for i = 1:numel(features) + + % check for neccesary parameters + if isempty(features(i).my_db.commondb) + + error('Define the global transformation first') + return; + end + + final = features(i).data.stobraw'; + + if features(1).my_params.stob_norm == 1 + + [final] = mapminmax('apply', final, features(1).common.stobstats.post_norm); + end + + features(i).data.final.vector = final; + features(i).data.final.dim = size(final,1); + + % fill up info struct and append to feature + features(i).data.final.vector_info.labels = ... + features(i).data.vector_info; + end + + end + + % --- + % TODO: Maybe delete more basic features again at this point? + % --- + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureStober11Genre.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureStober11Genre.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,182 @@ +classdef MTTMixedFeatureStober11Genre < MTTAudioFeature & handle + % --- + % + % The usual worklow for these features constist of three steps + % 1. extract: extracts the basic single-file dependent features + % 2. define_global_transform: calculates the global feature + % transformation parameters + % 3. finalise: applies the common transformations to a specific feature + % --- + + properties(Constant = true) + + % svn hook + my_revision = str2double(substr('$Rev$', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = struct(... + ... % --- + ... % these are Stober11 parameters + ... % --- + 'stob_lowaudio', 1, ... + 'stob_highaudio', 1, ... % + 'stob_tags', 1, ... + 'stob_norm', 1, ... + ... % --- + ... % following are GenreBasic parameters + ... % --- + 'pct_genres', 1, ... % 1/100 percentile genre tags used + 'empty_genres', 1 ... % allow empty genres to persist + ); + end + + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureStober11Genre(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + % --- + % extract feature data by combining genre adn stob features + % --- + function data = extract(feature, clip) + + % --- + % get Basic Summary audio features. this includes possible + % local normalisations + % --- + stob = clip.features('MTTMixedFeatureStober11',feature.my_params); + + % --- + % get genre tag features + % --- + + genrebasic = clip.genre_features_basic(feature.my_params); + + % save to features data field + data.stob = stob; + data.tags = genrebasic; + + % prepare field for final features + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + + % save info data + data.info.type = 'MTTMixedFeatureStober11Genre'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + end + + function define_global_transform(features) + % calculate and set normalization factors from the group of + % input features. These features will be set for the full database + + if numel(features) == 1 + error ('Insert feature array for this method'); + end + + % --- + % We collect all the relevant stob + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + stob(i) = features(i).data.stob; + end + + % call the features own transsform function + stob.define_global_transform(); + + % --- + % We collect all the relevant genretag + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + genrebasic(i) = features(i).data.tags; + end + + % call the features own transsform function + genrebasic.define_global_transform(); + + % --- + % set common feature values for mixed features + % --- + features(1).my_db.set_common([1]); %trivial common + end + + + function finalise(feature) + % applies a final transformation and collects the + % information of this feature within a single vector + % see info for types in specific dimensions + + for i = 1:numel(feature) + + % check for neccesary parameters + if isempty(feature(i).my_db.commondb) + + error('Define the global transformation first'); + end + + % --- + % finalise audio feature and get vector + % --- + stob = feature(i).data.stob; + stob.finalise(); + + % finalise tag features + genrebasic = feature(i).data.tags; + genrebasic.finalise; + + % --- + % final data assembly + % --- + + % concatenate vectors + feature(i).data.final.vector = ... + [stob.vector() ; genrebasic.vector()]; + + % add feature dimensions + feature(i).data.final.dim = stob.dim + genrebasic.dim; + + % concatenate labels + feature(i).data.final.vector_info.labels = ... + {stob.data.final.vector_info.labels{:}, ... + genrebasic.data.final.vector_info.labels{:}}; + end + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + + function visualise(feature) + % --- + % plots the different data types collected in this feature + % --- + for i = 1:numel(feature) + clip = MTTClip(feature(i).owner_id()); + end + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureStober11GenrePCA.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureStober11GenrePCA.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,264 @@ +% -- +% This class loads and hanles the aufdio features included with the MTT +% Library +% --- +classdef MTTMixedFeatureStober11GenrePCA < MTTAudioFeature & handle + + properties(Constant = true) + + my_revision = str2double(substr('$Rev: 457 $', 5, -1)); + end + + properties + + % --- + % Set default parameters + % --- + my_basetype = 'MTTMixedFeatureStober11Genre'; + + my_params = MTTAudioFeature.inherited_params(... + 'MTTMixedFeatureStober11Genre', ... + 'min_pca_var', 0, ... % [0-1], def. 0 fraction of variance to keep + 'max_pca_coeffs', 0, ...% max. number of final coefficients + 'norm_pre_pca', 1, ... + 'norm_post_pca', 1 ...% normalise pca coefficients after transformation + ); + end + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureStober11GenrePCA(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + % --- + % load feature data from xml file + % --- + function data = extract(feature, clip) + % load feature data by parsing xml + + global globalvars; + + % --- + % we extract the base features, and save + % the pointers to these. + % the main work is then done in the define_global_transf + % and finalise functions. + % --- + data.basefeat = clip.features(feature.my_basetype,... + feature.my_params); + + % save info data + data.info.type = class(feature); + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save param data + data.info.params = feature.my_params; + + % prepare field for final features + data.final.vector = []; + data.final.dim = 0; + data.final.vector_info.labels = {}; + end + + function define_global_transform(features) + + if numel(features) == 1 + error ('Insert feature array for this method'); + end + + % --- + % We collect all the relevant genretag + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + basef(i) = features(i).data.basefeat; + end + + % call the features own transsform function + basef.define_global_transform(); + + % --- + % finalise the basic features, and + % get the feature vectors; + % --- + X = basef.vector(); + + % check dataset dimension + if numel(features) < basef.dim; + + error ('Not enough feature vectors for PCA calculation. need %d samples', ... + basef.dim); + end + + % --- + % NOTE: should the data be normalised and scaled to -1:1 + % instead of being in a range of 0-1 AND max-min = 1 + % --- + if features(1).my_params.norm_pre_pca == 1 + + [X, pstd] = mapminmax(X,-1,1); + common.pca.pre_norm = pstd; + elseif features(1).my_params.norm_pre_pca == 2 + + [X, pstd] = mapstd(X,0,1); + common.pca.pre_norm = pstd; + end + + + % --- + % get and apply the principal component analysis + % NOTE: the variance percentile is applied here, too. + % --- + + [Y, ps] = processpca(X, 0); + common.pca.transform = ps; + + % --- + % get cumulative sum of variance, and decide on cutoff + % point + % --- + v = cumsum(var(Y')); + v = v / max(v); + common.pca.transform.varpart = v; + + if features(1).my_params.min_pca_var > 0 + + min_pca_idx = find(v >= features(1).my_params.min_pca_var,1, 'first'); + + % save into pca structure + common.pca.transform.yrows = min_pca_idx; + + end + + % normalise pca values after processing + if features(1).my_params.norm_post_pca + + [Y,pmm] = mapminmax(Y,0,1); + common.pca.post_norm = pmm; + + end + + % --- + % set common feature values for mixed features + % --- + features(1).my_db.set_common(common); + + % save the transformed features straight away! + features.finalise(Y); + end + + function finalise(feature, final) + % applies a final transformation and + % collects the information of this feature within a single vector + % see info for types in specific dimensions + + % determine size + if feature(1).my_params.max_pca_coeffs > 0 + + max_size = min(feature(1).common.pca.transform.yrows, ... + feature(1).my_params.max_pca_coeffs); + else + + max_size = feature(1).common.pca.transform.yrows; + end + + + % prepare information + info = {'PCA'}; + if isfield(feature(1).common.pca.transform, 'varpart') + info(2:max_size) = num2cell(feature(1).common.pca.transform.varpart(2:max_size)); + else + info(2:max_size) = num2cell(2:max_size); + end + + + % check if features have been finalised already + if nargin == 2 && isempty(final) + + % the final vector etc already are set to zero; + return; + + elseif nargin == 2 && (numel(feature) == size(final, 2)) + + for i = 1:numel(feature) + + % save final vector and description + feature(i).data.final.vector = final(1:max_size,i); + feature(i).data.final.dim = max_size; + feature(i).data.final.vector_info.labels = info; + end + + else + % features have to be transformed first + % --- + % TODO: this code remains untested + % --- + + % check for neccesary parameters + if isempty(feature(1).my_db.commondb) + + error('Define the global transformation first') + return; + end + + + for i = 1:numel(feature) + + % check for neccesary parameters + if isempty(feature(i).my_db.commondb) + + error('Define the global transformation first') + end + + % --- + % get feature vector and apply transformation + % --- + X = feature(i).data.basefeat.vector(); + + % --- + % apply normalisation used for removing mean + % in training data + % --- + if feature(1).my_params.norm_pre_pca == 1 + + X = mapminmax('apply', X, feature(1).common.pca.pre_norm); + elseif feature(1).my_params.norm_pre_pca == 2 + + X = mapstd('apply', X, feature(1).common.pca.pre_norm); + end + + % apply PCA transform + vec = processpca('apply', X, feature(1).common.pca.transform); + + % normalise pca values after transformation + if feature(1).my_params.norm_post_pca + + vec = mapminmax('apply', vec,... + feature(1).common.pca.post_norm); + end + + % --- + % cut vector to final size. + % NOTE: this should be done before + % transformation to reduce computation time + % --- + vec = vec(1:max_size); + + % save final vector and description + feature(i).data.final.vector = vec; + feature(i).data.final.dim = numel(vec); + feature(i).data.final.vector_info.labels = info; + end + end + end + + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTMixedFeatureStober11Slaney08GenreBasicSm.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTMixedFeatureStober11Slaney08GenreBasicSm.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,178 @@ +classdef MTTMixedFeatureStober11Slaney08GenreBasicSm < MTTAudioFeature & handle + % --- + % + % The usual worklow for these features constist of three steps + % 1. extract: extracts the basic single-file dependent features + % 2. define_global_transform: calculates the global feature + % transformation parameters + % 3. finalise: applies the common transformations to a specific feature + % --- + + properties(Constant = true) + + % svn hook + my_revision = str2double(substr('$Rev: 638 $', 5, -1)); + end + + properties + % --- + % Set default parameters + % --- + my_params = MTTAudioFeature.inherited_params(... + 'MTTMixedFeatureSlaney08GenreBasicSm', ... + ... % --- + ... % these are Stober11 parameters + ... % --- + 'stob_lowaudio', 1, ... + 'stob_highaudio', 1, ... % + 'stob_tags', 1, ... + 'stob_norm', 1 ... + ... % --- + ); + end + + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTMixedFeatureStober11Slaney08GenreBasicSm(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + % --- + % extract feature data by combining genre adn stob features + % --- + function data = extract(feature, clip) + + % --- + % get Basic Summary audio features. this includes possible + % local normalisations + % --- + stob = clip.features('MTTMixedFeatureStober11',feature.my_params); + + % --- + % get genre tag features + % --- + genrebasicsm = clip.features('MTTMixedFeatureSlaney08GenreBasicSm',feature.my_params); + + % save to features data field + data.stob = stob; + data.tags = genrebasicsm; + + % prepare field for final features + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + + % save info data + data.info.type = 'MTTMixedFeatureStober11Slaney08GenreBasicSm'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + end + + function define_global_transform(features) + % calculate and set normalization factors from the group of + % input features. These features will be set for the full database + + if numel(features) == 1 + error ('Insert feature array for this method'); + end + + % --- + % We collect all the relevant stob + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + stob(i) = features(i).data.stob; + end + + % call the features own transsform function + stob.define_global_transform(); + + % --- + % We collect all the relevant genretag + % features and get the transform on this basis. + % --- + for i = 1:numel(features) + genrebasicsm(i) = features(i).data.tags; + end + + % call the features own transsform function + genrebasicsm.define_global_transform(); + + % --- + % set common feature values for mixed features + % --- + features(1).my_db.set_common([1]); %trivial common + end + + + function finalise(feature) + % applies a final transformation and collects the + % information of this feature within a single vector + % see info for types in specific dimensions + + for i = 1:numel(feature) + + % check for neccesary parameters + if isempty(feature(i).my_db.commondb) + + error('Define the global transformation first'); + end + + % --- + % finalise audio feature and get vector + % --- + stob = feature(i).data.stob; + stob.finalise(); + + % finalise tag features + genrebasicsm = feature(i).data.tags; + genrebasicsm.finalise(); + + % --- + % final data assembly + % --- + + % concatenate vectors + feature(i).data.final.vector = ... + [stob.vector() ; genrebasicsm.vector()]; + + % add feature dimensions + feature(i).data.final.dim = stob.dim + genrebasicsm.dim; + + % concatenate labels + feature(i).data.final.vector_info.labels = ... + {stob.data.final.vector_info.labels{:}, ... + genrebasicsm.data.final.vector_info.labels{:}}; + end + end + + % --- + % destructor: do we really want to remove this + % from the database? No, but + % TODO: create marker for unused objects in db, and a cleanup + % function + % --- + function delete(feature) + + end + + function visualise(feature) + % --- + % plots the different data types collected in this feature + % --- + for i = 1:numel(feature) + clip = MTTClip(feature(i).owner_id()); + end + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTRandomFeature.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTRandomFeature.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,71 @@ +% -- +% This class loads and hanles the aufdio features included with the MTT +% Library +% --- +classdef MTTRandomFeature < MTTAudioFeature & handle + + properties(Constant = true) + + my_revision = str2double(substr('$Rev: 167 $', 5, -1)); + end + + properties + + % --- + % Set default parameters + % --- + my_params = struct(... + 'nrandoms', 100 ... + ); + end + % --- + % member functions + % --- + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTRandomFeature(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + % --- + % load feature data from xml file + % --- + function data = extract(feature, clip) + % load feature data by parsing xml + + global globalvars; + + % fprintf('parsing features for clip %d \n',clip.id()); + + % parse feature + data.random = rand(feature.my_params.nrandoms, 1); + + % save info data + data.info.type = 'MTTRandomFeature'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save param data + data.info.params = feature.my_params; + + % prepare field for final features + data.final.vector = data.random; + data.final.dim = numel(data.final.vector); + + info = {'random'}; + info(2:data.final.dim) = num2cell(2:data.final.dim); + data.final.vector_info.labels = info; + end + + function visualise(feature) + % --- + % plots the different data types collected in this feature + % --- + + end + end +end diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/MTTTagFeatureGenreBasic.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/MTTTagFeatureGenreBasic.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,204 @@ +classdef MTTTagFeatureGenreBasic < MTTAudioFeature & handle + + + properties(Constant = true) + + my_revision = str2double(substr('$Rev: 99 $', 5, -1)); + end + + % --- + % here come the internal clip properties. + % the database is stored as a global variable + % --- + properties + + my_params = struct(... + ... % --- + ... % these are GenreBasic parameters + ... % --- + 'pct_genres', 1, ... % 1/100 percentile genre tags used + 'empty_genres', 0 ... % allow empty genres to persist + ); + end + + methods + + % --- + % constructor: pointer to feature in database + % --- + function feature = MTTTagFeatureGenreBasic(varargin) + + feature = feature@MTTAudioFeature(varargin{:}); + end + + function data = extract(feature, clip) + % process tag information and build the tag vector + + % --- + % get total number of tags. + % + % NOTE: the highest tag id should match the size of the + % tagdb lexicon + % --- +% num_tags = clip.my_db.genredb.size(); + + [tagids, score] = clip.my_db.genredb.annotids_for_owner(clip.id); + + % save lexicon + data.lexicon = clip.my_db.genredb.lexicon; + + % save to data structure + data.tags.ids = tagids; + data.tags.scores = score; + + % save info data + data.info.type = 'MTTTagFeatureGenreBasic'; + data.info.owner_id = clip.id; + data.info.creatorrev = feature.my_revision; + + % save parameters + data.info.params = feature.my_params; + + data.final.vector = []; + data.final.vector_info = struct(); + data.final.dim = 0; + end + + + % --- + % NOTE: the following transforms are just stated fro + % future use by now + % --- + function define_global_transform(features) + % --- + % compute the relevant tags, and save + % them in the commom place + % --- + + % compute extreme cases + if features(1).my_params.pct_genres == 1 && features(1).my_params.empty_genres + % all tags allowed + + common.rel_dimensions.tags.ids = 1:numel(features(1).data.lexicon); + common.rel_dimensions.tags.id_pos = 1:numel(features(1).data.lexicon); + + % set common feature values + features(1).my_db.set_common(common); + return; + + elseif features(1).my_params.pct_genres == 0 + % no tags, f.e. for parameter experiments in + % higher level features + + common.rel_dimensions.tags.ids = []; + common.rel_dimensions.tags.id_pos = []; + + % set common feature values + features(1).my_db.set_common(common); + return; + end + + + allids = sparse(numel(features), numel(features(1).data.lexicon)); + for i = 1:numel(features) + + allids(i,features(i).data.tags.ids) = ... + allids(i,features(i).data.tags.ids) + features(i).data.tags.scores; + end + + % --- + % get usage of tags and filter not-used tags + % --- + tagsum = sum(allids, 1); + nonzero = tagsum > 0; + + % --- + % NOTE: We remove the empty genres, then + % sort by weight / number of appearance and cut off + ids = find(nonzero); + [null, idx] = sort(tagsum(ids),'descend'); + ids = ids(idx); + + % cutoff + num_tags = ceil( features(1).my_params.pct_genres * numel(ids)); + valid_ids = ids(1:min(end, num_tags)); + + % --- + % NOTE: make sure that the positions for the tags + % stay correctly assigned / consistent + % --- + id_pos = sparse(size(valid_ids)); + id_pos(valid_ids) = 1:num_tags; + + % save to common data structure + common.rel_dimensions.tags.ids = valid_ids; + common.rel_dimensions.tags.id_pos = id_pos; + + % set common feature values + features(1).my_db.set_common(common); + end + + function finalise(features) + + for i = 1:numel(features) + + % check for neccesary parameters + if isempty(features(i).my_db.commondb) + + error('Define the global transformation first') + return; + end + + % get valid tag ids + valid_ids = features(i).common.rel_dimensions.tags.ids; + num_tags = numel(valid_ids); + + % --- + % get vector positions for contined ids, + % and sort out the non_allowed id's + % --- + id_pos = features(i).common.rel_dimensions.tags.id_pos; + + [has_ids, has_pos] = intersect(features(i).data.tags.ids, valid_ids); + + score = features(i).data.tags.scores; + + % create feature vector + vector = zeros(num_tags,1); + vector(id_pos(has_ids)) = score(has_pos); + + % --- + % NOTE: this labelling costs to much space + % --- + vector_info.labels = features(i).data.lexicon(valid_ids); + + % save into feature struct + features(i).data.final.dim = num_tags; + features(i).data.final.vector = vector; + features(i).data.final.vector_info = vector_info; + end + end + + function [a1] = visualise(feature) + + % get tag descriptions form first clip + tags = MTTClip(feature(1).owner_id()).my_db.genres; + + % works for multiple feature instances + for i = 1:numel(feature) + + clip = MTTClip(feature(i).owner_id()); + + % plot feature data + h = bar(feature(i).data.final.vector); + a1 = gca; + + set(a1,'XTick', 1:feature(i).data.final.dim,... + 'XTickLabel', tags); + title(sprintf('clip %d: %s by %s, genres', ... + clip.id, clip.title(),clip.artist())); + end + end + + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/album_artist_magnatunedb_correspondence.mat Binary file core/magnatagatune/album_artist_magnatunedb_correspondence.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/annotations_final.mat Binary file core/magnatagatune/annotations_final.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/clip_info_final.mat Binary file core/magnatagatune/clip_info_final.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/compared_cyclefinder_stober.mat Binary file core/magnatagatune/compared_cyclefinder_stober.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/comparisons_final.mat Binary file core/magnatagatune/comparisons_final.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/db.mat Binary file core/magnatagatune/db.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/features_stober.mat Binary file core/magnatagatune/features_stober.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/fm_corresponding_albums.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/fm_corresponding_albums.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,47 @@ +function [fmalbums, fmalbum_names] = fm_corresponding_albums(albums) +%fmalbums = fm_corresponding_albums(albums) +% +% searches last.fm for the artist given in a cell +% array of strings and saves the first hit into the output. +% +% fmalbum_names = {magnatagatune_artist, fm_artist}; +% +% returns -1 of no matches found +maxtry = 3; + +albums = unique(albums); +i = 1; +numtry = 0; +while i < numel(albums) + + % --- + % as sometimes this fails due to connection problems + % or other problems not yet identified, we TRY + % + % for fails, we may try one time again! + % --- + fprintf('%d percent: %s\n',floor(i*100/numel(albums)),char(albums{i})); + try + [tmp, tmpid] = fm_retrieve_album(char(albums{i})); + fmalbums(i,:) = {albums{i}, tmp{1}, tmpid{1}}; + catch err + + % no work :( + warning(err.message); + fmalbums{i} = '-1'; + + numtry = numtry + 1; + + % --- + % NOTE: we try again in case it fails for < maxtry times + % --- + if numtry < maxtry + i = i - 1; + else + numtry = 0; + end + end + i = i + 1; +end + +fmalbum_names = {'magnatagatune_album', 'fm_album', 'mbid'}; diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/fm_corresponding_artists.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/fm_corresponding_artists.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,45 @@ +function [fmartist_name, fmartist_name_names] = fm_corresponding_artists(artists) +%[artists, fmartists] = fm_corresponding_artists(artists) +% +% searches last.fm for the artist given in a cell +% array of strings and saves the first hit into the output. +% +% returns -1 of no matches found +maxtry = 10; + +% artists = unique(artists); +i = 1; +numtry = 0; +while i <= numel(artists) + + % --- + % as sometimes this fails due to connection problems + % or other problems not yet identified, we TRY + % + % for fails, we may try one time again! + % --- + fprintf('%d percent: %s\n',floor(i*100/numel(artists)),char(artists{i})); + try + [tmp, tmpid] = fm_retrieve_album(char(artists{i})); + fmartist_name(i,:) = {artists{i}, tmp{1}, tmpid{1}}; + catch err + + % no work :( + warning(err.message); + fmartist_name(i,:) = {artists{i}, '-1', '-1'}; + + numtry = numtry + 1; + + % --- + % NOTE: we try gain in case it fails for < maxtry times + % --- + if numtry < maxtry + i = i - 1; + else + numtry = 0; + end + end + i = i + 1; +end + +fmartist_name_names = {'magnatagatune_album', 'fm_album', 'mbid'}; \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/genre_stats.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/genre_stats.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,58 @@ +function out = genre_stats(tagged, names, freqs, childof) +% out = genre_stats(names, freqs, childof) +% +% calculates statistics for genre distributions +% +% + +% get overall genre frequency and sort accordingly +allapp = sum(freqs,2); +allapp = allapp/max(allapp); + +[null, idx] = sort(allapp,'descend'); + +% get root potential +rootpot = 1 - sum(childof,2); + +figure; +bar(1:numel(names),[allapp(idx) rootpot(idx)]) +set(gca,'XTick',1:numel(names)); +set(gca,'XTickLabel',names(idx)); +legend('#appearances','root genre possibility'); +title 'genre statistics sorted by frequency of appearances' + +% --- +% determine genres that include x% of the whole dataset +% --- +pctl = 0.98; % 80 percent included + +% --- +% re-sort by appearance and root potential. +% using the multiplication, we can filter out subgenres +% --- +[null, idxrt] = sort(rootpot.*allapp,'descend'); + +% iteratively add 'best' genre according to root potential +gotclips = []; +numclips = []; +num_included = 0; +i = 1; +while i <= numel(names) && num_included < pctl * length(tagged) + + % count clips found for this genre + fprintf('%s \n', char(names{idxrt(i)})); + newclips = setdiff(find(tagged(:,idxrt(i)))', gotclips); + + gotclips = [gotclips newclips]; + numclips(i) = numel(newclips); + + num_included = num_included + numclips(i); + i = i + 1; +end + +figure; +pie(numclips(numclips > 0) / length(tagged)); +legend(names{idxrt(numclips > 0)}); + +out = []; + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/get_comparison_stats.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/get_comparison_stats.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,136 @@ +function [num_compares] = get_comparison_stats() +% [num_compares] = get_comparison_stats() +% +% - retrieves number of comparisons for each pair of data clips +% - extracts genre associations and statistics + +global db_magnaclips; +global comparison; +global comparison_ids; + +num_compares = sparse(numel(comparison_ids), numel(comparison_ids)); + +for i = 1:size(comparison,1) + abc = sort(comparison(i,1:3)); + a = abc(1); + b = abc(2); + c = abc(3); + num_compares(a,b) = num_compares(a,b) +1; + num_compares(b,c) = num_compares(b,c) +1; + num_compares(a,c) = num_compares(a,c) +1; +end + +% --- +% check for repeated pairs +% --- +% [i,j] = ind2sub(size(num_compares), find(num_compares > 1)) +% num = 300; +% a = find((comparison(:,1) == i(num) | comparison(:,2) == i(num) | comparison(:,3) == i(num))...) +% & (comparison(:,1) == j(num) | comparison(:,2) == j(num) | comparison(:,3) == j(num))) +% +% comparison(a,:) + + +% ------------------------------------------------------------ +% compare genre frequency +% --- +clips = MTTClip(comparison_ids); + +% comparison genres +[genres, scores, gid] = clips.genres(); + +% all genres +[genres2, scores2, gid2] = db_magnaclips.genredb.stats; + +% --- +% plot top genres +% --- + +figure; +bar(scores(1:20)) +set(gca, 'XTick',1:20,'XTickLabel',genres(1:20)) + +figure; +subplot(2,1,1) +bar(scores(1:10) / max(scores)) +set(gca, 'XTickLabel',genres) +title 'comparison subset' + +subplot(2,1,2) +bar(scores2(1:10) / max(scores2)) +set(gca, 'XTickLabel',genres2,'FontSize', 8) +title 'Full MTT data set' + +% --- +% evaluate differences in distribution for all genres +% shown are changes in relation to the whole database(genres2) +% ( relative to the summed tags ) +% --- +genrediff = zeros(numel(genres2),1); +for i=1:numel(genres2) + +% mgen = strcellfind(genres, genres2(i)); + mgen = find(gid == gid2(i)); + + if ~isempty(mgen) + % genrediff(i,1) = scores(mgen)/sum(scores) - scores2(i)/sum(scores2); + genrediff(i,1) = 1 - ( (scores2(i)/sum(scores2)) / (scores(mgen)/sum(scores))); + + else +% genrediff(i) = inf; + error 'genre ids not consistent' + end +end +genrediff = genrediff * 100; + +% visualise difference +figure +bar(genrediff); +set(gca, 'XTick',1:44,'XTickLabel',genres2) +axis([0 45 -200 100]); +title 'relative loss for each genre considering comparison as an excerpt from MTT' +ylabel 'loss(%)' + +% --- +% get distinc genre music sets: +% we try the following sets of 'similar' genre tags +% --- +cgdb = db_magnaclips.genredb.subset(clips.id); + +cids = cgdb.owner({'Classical', 'Baroque'}, 'or'); +cgdb = cgdb.exclude(cids); + +eids = cgdb.owner({'Electronica', 'New Age', 'Ambient'}, 'or'); +cgdb = cgdb.exclude(eids); + +rids = cgdb.owner({'Rock', 'Alt Rock', 'Hard Rock', 'Metal'}, 'or'); +cgdb = cgdb.exclude(rids); + +% get reverse indexing for comparisons +rev_compid = sparse(comparison_ids, 1, 1:numel(comparison_ids)); + +% 8 triples entirely classical +cfit = get_comparison_linfits(comparison, rev_compid(cids)); + +% 43 triples entirely electronic +efit = get_comparison_linfits(comparison, rev_compid(eids)); + +% 6 triples entirely rock +rfit = get_comparison_linfits(comparison, rev_compid(rids)); + +cgdb.stats + +end +% this function returns for each comparison line +% the num. of appearance of given selection of clip ids +function out = get_comparison_linfits(comparison, goodset) + + out = zeros(1, size(comparison,1)); + % for each line + for i = 1:size(comparison,1) + + out(i) = numel(intersect(comparison(i,1:3), goodset')); + end +end + + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/get_magnagenre_numeric.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/get_magnagenre_numeric.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,108 @@ +function [clip_magnagenres, magnagenres, magnagenre_freqs, magnagenre_childof] = get_magnagenre_numeric(clip_info_magnagenres); +% +% uses the clip_info magnagenres_final db to get a numerical genre +% representation ofthe database +% +% genre_freqs: frequency of genre x in position y +% genre_childof: percentage of genre x being a successor of genre y +% +% reimports the text - based representation of magnatunes and tries to +% determine an underlying structure. +% + +data = clip_info_magnagenres(:,3); + +% --- +% genre and genre posfrequency list: +% --- + +magnagen_id = {}; + +genres = {}; + +max_genres = 50; +max_simul_genres = 4; +genre_freqs = []; +genre_childof = []; + +% for each of the genre tags +for i = 1:length(data) + + % separate genres; + tmp = explode(',', data{i}); + + % find and save correspnding genre indices + for j = 1:length(tmp) + genidx = strcellfind(genres,tmp(j)); + + % add genre to genre list if not existent + if genidx < 1 + genidx = size(genres, 1) + 1; + genres = cat(1, genres, tmp(j)); + + genre_freqs(genidx,:) = zeros(1, max_simul_genres); + genre_childof(genidx,:) = zeros(1, max_genres); + end + + % --- + % here, we save the index to a new genre structure + % --- + if j == 1 + magnagen_id{i} = genidx; + else + magnagen_id{i} = [magnagen_id{i}, genidx]; + end + % --- + % further genre statistics, perhaps its a hierarchy + % --- + + % save frequency by position + genre_freqs(genidx, j) = genre_freqs(genidx, j) + 1; + + % save parent genre if applicable + if j == 1 + + % remember parent index + paridx = genidx; + else + + % count index for this parent + genre_childof(genidx, paridx) = genre_childof(genidx, paridx) + 1; + end + end + +% --- +% - save preceeding first genre for this into another table +% --- +end + +% --- +% this should output quite generic data, to ease +% comparison with other genre hierarchies. +% +% thus, we set the parental relation relative to overall +% appearance of the child genre +% --- + +% remove overlapping columns +idx = find(sum(genre_childof,1) > 0,1, 'last'); +idx = max(size(genre_childof,1),idx); + +genre_childof = genre_childof(:, 1:idx); + +% make values relative to total occurrence of child +for i = 1: size(genre_childof,1) + genre_childof(i, :) = genre_childof(i, :) ./ sum(genre_freqs(i,:)); +end + +% --- +% reformat genre attribute table as sparse matrix +% --- +clip_magnagenres = sparse(length(magnagen_id),length(genres)); +for i = 1:length(magnagen_id) + clip_magnagenres(i,magnagen_id{i}) = 1; +end + +magnagenres = genres; +magnagenre_freqs = genre_freqs; +magnagenre_childof = genre_childof; diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/get_similar_clips.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/get_similar_clips.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,24 @@ +function [clips, score] = get_similar_clips(extid, sim, sim_ids) +% clips = get_similar_clips(id, sim, sim_id) +% +% searches for similar clips for clip with base_id id +% + +if nargin < 3 + warning 'no similarity id codebook given' + sim_ids = 1:size(sim,1); +end + +% get internal clip id +id = find(sim_ids == extid); + +if isempty(id) + error 'no similarity data available' +end + +clips = [find(sim(id,:) ~= 0)]; + +% get clip scores +score = sim(id,clips); + +clips = sim_ids(clips); diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/last.fm.mat Binary file core/magnatagatune/last.fm.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/macro_sim_graph_analysis.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/macro_sim_graph_analysis.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,129 @@ + +% Results: +Gcomp = ClipComparedGraph(comparison, comparison_ids) +[Gcompcomps] = Gcomp.all_connected_components(); +% only 2graphs with 7 nodes, all subtriplets just connect via +% one node -> no "infestation" of other triplets + +G = ClipSimGraph(comparison, comparison_ids) +[Gs,s, id] = G.all_strongly_connected_components(); +u = hist(s); +% 179 cycled triplets in unprepared graph + +Gmd = ClipSimGraphMD(comparison, comparison_ids) +[Gsmd, smd, id] = Gmd.all_strongly_connected_components(); +Gsmd(1).order; +% Not a single triple cycle found + +% --- +% analysis on compliance with stobers preprocessing +% trying to find out the differing ? Graph structures +% for that sake analysis with connected (no cycles) components +% --- +Graw = ClipSimGraph(comparison, comparison_ids); +[Gsraw, sraw] = connected_components(Graw) +hist(sraw) +% in the unedited graph, there are 346 connected triplets + +Gm = ClipSimGraphMulti(comparison, comparison_ids); +[Gsraw2, sraw2] = connected_components(Gm) +u = hist(sraw2) +% in this unedited graph, there are still 346 connected triplets + +Gm.remove_cycles_length2; +[Gs, s, id] = connected_components(DiGraph(Gm)); +u = hist(s) +% 27 single nodes, 337 three-node graphs. why not two-nodes + +% which clips do these belong to? +soloid = find(s == 1); +soloclips = []; +for i = 1:numel(soloid) + tmpG = ClipSimGraph(Gs(soloid(i))); + + % get the clisp of the single node + [sm, bg] = tmpG.clipsN(tmpG.nodes); + soloclips = [soloclips sm bg]; +end +soloclips = unique(soloclips) +% exactly 27 clips lost + +% now with our graph processing +Gmd = ClipSimGraphMD(comparison, comparison_ids) +[Gsmd, smd] = connected_components(Gmd) +% same result, + +% --- +% now which are the orphaned nodes ? +% --- +Go = Gm.unconnected_subgraph(); +Go.label(Go.nodes) + + +% --- +% validation with comparison data +% --- + +num_compares = sparse(numel(comparison_ids), numel(comparison_ids)); +for i = 1:size(comparison,1) +abc = sort(comparison(i,1:3)); +a = abc(1); +b = abc(2); +c = abc(3); +num_compares(a,b) = num_compares(a,b) +1; +num_compares(b,c) = num_compares(b,c) +1; +num_compares(a,c) = num_compares(a,c) +1; +end + +[i,j] = ind2sub(find(num_compares > 2)) +[i,j] = ind2sub(size(num_compares), find(num_compares > 2)) +[i,j] = ind2sub(size(num_compares), find(num_compares > 1)), +i(1) +j(1) +find(comparison(:,1) == i(1) || comparison(:,2) == i(1) || comparison(:,3) == i(1)...) +&& comparison(:,1) == j(1) || comparison(:,2) == j(1) || comparison(:,3) == j(1)) +find((comparison(:,1) == i(1) | comparison(:,2) == i(1) | comparison(:,3) == i(1))...) +& (comparison(:,1) == j(1) | comparison(:,2) == j(1) | comparison(:,3) == j(1))) + +num = 2; +a = find((comparison(:,1) == i(num) | comparison(:,2) == i(num) | comparison(:,3) == i(num))...) +& (comparison(:,1) == j(num) | comparison(:,2) == j(num) | comparison(:,3) == j(num))) +comparison(a,:) +[i,j] = ind2sub(size(num_compares), find(num_compares > 1)) +num = 3; +a = find((comparison(:,1) == i(num) | comparison(:,2) == i(num) | comparison(:,3) == i(num))...) +& (comparison(:,1) == j(num) | comparison(:,2) == j(num) | comparison(:,3) == j(num))) +comparison(a,:) +[i,j] = ind2sub(size(num_compares), find(num_compares > 1)) +num = 18; +a = find((comparison(:,1) == i(num) | comparison(:,2) == i(num) | comparison(:,3) == i(num))...) +& (comparison(:,1) == j(num) | comparison(:,2) == j(num) | comparison(:,3) == j(num))) +comparison(a,:) +[i,j] = ind2sub(size(num_compares), find(num_compares > 1)) +num = 300; +a = find((comparison(:,1) == i(num) | comparison(:,2) == i(num) | comparison(:,3) == i(num))...) +& (comparison(:,1) == j(num) | comparison(:,2) == j(num) | comparison(:,3) == j(num))) +comparison(a,:) + +% --- +% other simulated data +% --- +ClipSimGraph([1 2 3 5 1 0;], 1:100).visualise +ClipSimGraph([1 2 3 5 1 0; 1 2 4 6 0 0], 1:100).visualise +ClipSimGraph([1 2 3 5 1 0; 1 2 4 6 0 0], 1:100).max_degree +G = ClipSimGraph(comparison, comparison_ids); + +% --- +% scc algorithm tests +% --- +V = ones(1,5) +E = zeros(5) +for i=1:4 +E(i+1,i) = 1 +end +E(1,4) = 1 +gtest = DiGRaph(V,E) + +gtest.strongly_connected_components(4) +gtest.strongly_connected_components(5) + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/macro_validate_cycle_finder.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/macro_validate_cycle_finder.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,94 @@ + +% profile clear; +% profile on; + +clear eval; +N = 20; +for ci = 1:N + order = ci; + % connectivity = ceil(rand(1) * order) + % E = mk_rnd_dag(order, connectivity); + E = rand(order) > exp(1) * rand(1); + + G = DiGraph(ones(order,1), E); + eval.G(ci) = G; + + eval.res_naive(ci) = acyclic(E); + + eval.res_myfun(ci) = G.isAcyclic; + + Gstob = ClipSimGraphStober(G); + + eval.res_stober(ci) = Gstob.isAcyclic; +end +eval + + +figure +plot([eval.res_naive' eval.res_myfun'+0.1 eval.res_stober'+0.2],'*') +legend naive myfun stober +axis([0 N -1 2 ]) +% profile viewer; + +%% +% --- +% Comparison of implementations on magnatagatune dataset +% --- + +% Stober Graph +Gstob = ClipSimGraphStober(); + +% Cast into matlab Graph +GstobG = Gstob.to_DiGraph; + +% My Multigraph reimplementation +Gm = ClipSimGraphMulti(comparison, comparison_ids); + +GstobG == Gm % TRUE - the converted Graph exactly resembles my extracted one + +Gm.isAcyclic % FALSE +GstobG.isAcyclic % FALSE + +Gstob.isAcyclic % TRUE (this is wrong, there are a lot of length-2-cycles) + +% --- +% Ok, now remove length 2 cycles +% --- +Gm.remove_cycles_length2 % matlab cycle remover +Gstob.remove_cycles_length2 % stober cycle remover + +GstobGnocycles = Gstob.to_DiGraph(); + +GstobGnocycles == Gm % TRUE - we remove the same edges + +% --- +% NOTE: There are no cycles left +% after removing length 2 cycles +% --- +GstobGnocycles.isAcyclic % TRUE + +Gstob.isAcyclic % FALSE inconsistent wrong result(compare with above) + + +%% Finds the actual defective cycle + +Gstob = ClipSimGraphStober(); +Gstob.remove_cycles_length2 + +GstobG = Gstob.to_DiGraph(); +Gstob.isAcyclic + +N = GstobG.node('227:45936'); + +[Gs] = GstobG.connected_components(N); +Gs.visualise + +% --- +% Search same node in my Multigraph reimplementation +% --- +Gm = ClipSimGraphMulti(comparison, comparison_ids); +Gm.remove_cycles_length2 +Nm = Gm.node(227,45936); + +[Gsm] = Gm.connected_components(Nm); +Gsm.visualise \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/macro_validate_stober_randomsets.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/macro_validate_stober_randomsets.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,50 @@ + +nruns = 50; +weights = zeros(674, nruns); +for i = 1:50; + Gstob = ClipSimGraphStober(); + Gstob.random_all_constraints_graph; + G = ClipSimGraphMulti(Gstob.to_DiGraph()); + + % --- + % derive similarity information: this + % returns the weights of edges meaning sim(a,b) > sim(a,c) + % --- + [weights(:,i), a, b, c] = G.similarities(); +end + +% --- +% lets see if the siilarity data is the same +% it is NOT +% --- +diff = sum(sum(abs(diff(weights)))) + +% --- +% ok, my theory is that +% a. all the three-edge components are erased one edge. +% b. if not all, then the ones with two edges +% being directed at the same node. +% --- + +% --- +% have to convert, as for some reason +% this does not work yet on multigraphs +% --- +Gdi = DiGraph(G); +[Gs, s, id] = Gdi.connected_components(); + +for i = 1:numel(Gs) + edges(i) = Gs(i).num_edges(); +end + +% only 2-edge graphs +max(edges) + +% --- +% test if graphs are subgraphs of 860 full ne +% --- +Gm = ClipSimGraphMulti(comparison, comparison_ids); +Gm.remove_cycles_lenght2; + +Gm.isSubgraph(G) + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/magnagenres_final.mat Binary file core/magnatagatune/magnagenres_final.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/magnatune_song_info.mat Binary file core/magnatagatune/magnatune_song_info.mat has changed diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/makro_ISMIR12_simdata.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/makro_ISMIR12_simdata.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +trainpart = logspace(-1.8, 0,20) + +[partBinTrn, partBinTst, partBinNoTrn] = sim_from_comparison_fair_components(comparison, comparison_ids, 10, trainpart); + +[partBinTrn, partBinTst, partBinNoTrn] = sim_from_comparison_UNfair_components(comparison, comparison_ids, 10, trainpart); \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/metric_fulfills_comparison.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/metric_fulfills_comparison.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +function out = metric_fulfills_comparison(dist, comparison, comparison_ids) \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/metric_fulfills_ranking.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/metric_fulfills_ranking.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,90 @@ +function [out, equal, singles, valid] = metric_fulfills_ranking(met, Y, clips) +% out = sim_fulfills_ranking(met, Y, ids) +% +% outputs (0-1) percentage of ranking constraints +% completely fulfilled by the given distance measure + +% --- +% for each valid query, check if the ranking is fulfilled +% we also count eeventswhere the distance is equal +% --- +valid = ~(cellfun(@isempty, Y(:,1)) | cellfun(@isempty, Y(:,2))); + +equal = 0; +singles = false(size(Y,1),1); +for i = 1:size(Y,1) + + if valid(i) + + if numel(Y{i,1}) == 1 && numel(Y{i,2}) == 1 + % --- + % Efficient singular ranking comparison + % using met.distance(clipa,clipb) + % --- + % NOTE: by using the < here, we loose half of the + % examples which have been same distance and + % randomly positioned in earlier runs :( + % --- + distgood = met.distance(clips(i), clips(Y{i,1})); + distbad = met.distance(clips(i), clips(Y{i,2})); + + singles(i) = distgood < distbad; + if distgood == distbad + equal = equal + 1; + end + else + % --- + % NOTE: Only reactivated for testing of + % outdated similarity data + % --- + + warning 'code only for outdated similarity data\n'; + + % --- + % NOTE: this code is analogous to the above + % --- + singles(i) = 1; + for j=1:numel(Y{i,1}) + for k = 1:numel(Y{i,2}) + + % --- + % All the entries in Y(i,1) have to come before + % the ones in Y(i,2) + % --- + distgood = met.distance(clips(i), clips(Y{i,1}(j))); + distbad = met.distance(clips(i), clips(Y{i,2}(k))); + singles(i) = singles(i) && (distgood < distbad); + + % count equalities + if distgood == distbad + equal = equal + 1; + end + if ~singles(i) + break; + end + end + end + + + end + else + end +end + + +out(1) = mean(singles(valid)); +if size(Y,2) == 3 + % --- + % weighted sum of singles: count satisfied votes + % --- + weights = cell2mat( Y( valid, 3)); + out(2) = sum(weights(singles(valid))) / sum(weights); +else + out(2) = -1; +end +out = out'; + +% markup invalid rankings +% singles(~valid) = -1; + + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/mlr_unittest.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/mlr_unittest.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,34 @@ +function mlr_unittest(X, Yrel) + +% Loss values to test + LOSS = {'AUC', 'Prec@k', 'MAP', 'MRR', 'NDCG'}; + + + % Regularization values to test + REG = [0,1,2,3]; + + % Batch sizes to test + BATCH = [0 1 5]; + + % Diagonal settings + DIAG = [0 1]; + + figure(1); + for l = 1:length(LOSS) + display(['Testing ', LOSS{l}]); + for r = 1:length(REG) + display(sprintf('\tREG=%d', REG(r))); + for b = 1:length(BATCH) + display(sprintf('\tB=%d', BATCH(b))); + for d = 1:length(DIAG) + display(sprintf('\tDiagonal=%d', DIAG(d))); + [W, Xi, D] = mlr_train(X, Yrel, 10e5, LOSS{l}, REG(r), DIAG(d), BATCH(b)); + imagesc(W); drawnow; +% [W, Xi, D] = mlr_train(X, Yclass, 10e5, LOSS{l}, REG(r), DIAG(d), BATCH(b)); +% imagesc(W); drawnow; + end + end + end + end + +end diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/play_comparison.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/play_comparison.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,11 @@ +function out = play_comparison(index, comparison, comparison_ids); +% out = play_comparison(index, comparison, comparison_ids); + +for i = 1:length(index) + + fprintf('\n \n-- comparison nr.%d ---\n', index(i)); + fprintf(' result: %d - %d - %d', comparison(index(i),4:6)); + + tmpclips = MTTClip(comparison_ids(comparison(index(i),1:3))); + tmpclips.play(); +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/ranking_from_comparison_ClipSimGraphMulti.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/ranking_from_comparison_ClipSimGraphMulti.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,98 @@ +function [r, valididx, clip_ids] = ranking_from_comparison_ClipSimGraphMulti(comparison, comparison_ids) +% returns rankings for compared songs. +% rankings are genrerated using a Multigraph analysis of the comparison +% data +% +% r(idx,1) = clip being similar to clip idx +% r(idx,2) = clip being less similar to clip idx +% r(idx,3) = weight of this ranking +% +% the indices in clipids correspond to the indices in comparison_ids + +% create similarity graph +% G = ClipSimGraphMulti(comparison, comparison_ids); +% G.remove_cycles_length2; +% save('comp_SimGraphMulti.mat', 'G'); +cprint(2, 'loading Multigraph for Similarity Constraints') +load('comp_SimGraphMulti.mat', 'G'); + + + +% inverse comparison index +inv_comparison = sparse(comparison_ids,1, 1:numel(comparison_ids)); + +% --- +% derive similarity information: this +% returns the weights of edges meaning sim(a,b) > sim(a,c) +% --- +[weights, a, b, c] = G.similarities(); +% --- +% NOTE: it is yet unclear which index +% (the comparison or the mtt clip index) +% is best to use +% +% The lines below transform the MTT ids from the graph into comparison ids +% --- +a = inv_comparison(a); +b = inv_comparison(b); +c = inv_comparison(c); + +% --- +% reformat into ranking: the first 1019 clips are ordered +% as in comparison_ids. then, clips mentioned more than once +% are attached. +% --- +[a, idx] = sort(a); +b = b(idx); +c = c(idx); + + +clip_ids = zeros(numel(a), 1); + +% --- +% NOTE: r may grow over its allocated size +% --- +r = cell(numel(a),1 ); + +% keep track of constraints which have been attached +visited = zeros(1, numel(a)); +for i = 1:numel(comparison_ids) + + % --- + % get first occurence of this index, + % and save the data into ranking + % --- + tmp_idx = find(a == i, 1, 'first'); + + clip_ids(i) = i; % == a(tmp_idx) + + if ~isempty(tmp_idx) + visited(tmp_idx) = true; + + r{i,1} = b(tmp_idx); + r{i,2} = c(tmp_idx); + r{i,3} = weights(tmp_idx); + + valididx(i) = true; + else + % invalid ranking + valididx(i) = false; + r{i,3} = 0; + end +end + +% --- +% Now we attach the remaining constraints +% --- +remaining = find(~visited); +clip_ids( numel(comparison_ids)+1 : numel(comparison_ids)+numel(remaining)) = a(remaining); + +for i = 1:numel(remaining) + r{ numel(comparison_ids)+i, 1} = b(remaining(i)); + r{ numel(comparison_ids)+i , 2} = c(remaining(i)); + r{ numel(comparison_ids)+i , 3} = weights(remaining(i)); +end + +valididx(numel(comparison_ids)+1 : ... + numel(comparison_ids)+numel(remaining)) = true; + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/ranking_from_comparison_Stober_all_constraints.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/ranking_from_comparison_Stober_all_constraints.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,98 @@ +function [r, valididx, clip_ids] = ranking_from_comparison_Stober_all_constraints(comparison, comparison_ids) +% returns rankings for compared songs. +% rankings are genrerated using a Multigraph analysis of the comparison +% data +% +% r(idx,1) = clip being similar to clip idx +% r(idx,2) = clip being less similar to clip idx +% r(idx,3) = weight of this ranking +% +% the indices in clipids correspond to the indices in comparison_ids + +% create similarity graph + +cprint(2, 'Generating Stober Multigraph for Similarity Constraints') +% Gstob = ClipSimGraphStober(); +% Gstob.random_all_constraints_graph; +% G = ClipSimGraphMulti(Gstob.to_DiGraph()); +load('comp_SimGraphStob_ac1.mat', 'G'); + + +% inverse comparison index +inv_comparison = sparse(comparison_ids,1, 1:numel(comparison_ids)); + +% --- +% derive similarity information: this +% returns the weights of edges meaning sim(a,b) > sim(a,c) +% --- +[weights, a, b, c] = G.similarities(); +% --- +% NOTE: it is yet unclear which index +% (the comparison or the mtt clip index) +% is best to use +% +% The lines below transform the MTT ids from the graph into comparison ids +% --- +a = inv_comparison(a); +b = inv_comparison(b); +c = inv_comparison(c); + +% --- +% reformat into ranking: the first 1019 clips are ordered +% as in comparison_ids. then, clips mentioned more than once +% are attached. +% --- +[a, idx] = sort(a); +b = b(idx); +c = c(idx); + + +clip_ids = zeros(numel(a), 1); + +% --- +% NOTE: r may grow over its allocated size +% --- +r = cell(numel(a),1 ); + +% keep track of constraints which have been attached +visited = zeros(1, numel(a)); +for i = 1:numel(comparison_ids) + + % --- + % get first occurence of this index, + % and save the data into ranking + % --- + tmp_idx = find(a == i, 1, 'first'); + + clip_ids(i) = i; % == a(tmp_idx) + + if ~isempty(tmp_idx) + visited(tmp_idx) = true; + + r{i,1} = b(tmp_idx); + r{i,2} = c(tmp_idx); + r{i,3} = weights(tmp_idx); + + valididx(i) = true; + else + % invalid ranking + valididx(i) = false; + r{i,3} = 0; + end +end + +% --- +% Now we attach the remaining constraints +% --- +remaining = find(~visited); +clip_ids( numel(comparison_ids)+1 : numel(comparison_ids)+numel(remaining)) = a(remaining); + +for i = 1:numel(remaining) + r{ numel(comparison_ids)+i, 1} = b(remaining(i)); + r{ numel(comparison_ids)+i , 2} = c(remaining(i)); + r{ numel(comparison_ids)+i , 3} = weights(remaining(i)); +end + +valididx(numel(comparison_ids)+1 : ... + numel(comparison_ids)+numel(remaining)) = true; + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/ranking_from_comparison_Stober_diffs.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/ranking_from_comparison_Stober_diffs.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,69 @@ +function [r, valididx, clip_ids, P] = ranking_from_comparison_Stober_diffs(nDatasets, nRuns) + +global globalvars; +global comparison; +global comparison_ids; + +% --- +% get the standard test set +% NOTE: we will keep the r and valididx, and just use +% the rest of the data for determining the issing edges etc +% --- +load('comp_SimGraphMulti_SimDataDump', 'G', 'r', 'valididx', 'clip_ids'); +nData = sum(valididx > 0); + +% --- +% initialise the new partition +% --- +P = cvpartition_alltrain(nData, nDatasets * nRuns); + + +for i = 1:nDatasets + + % --- + % create new Stober graph + % --- + Gstob = ClipSimGraphStober(); + Gstob.random_all_constraints_graph; + Gdiff = G - ClipSimGraphMulti(Gstob.to_DiGraph()); + + % --- + % get missing entries in stober similarity + % --- + idx = find_graph_in_valid(r, comparison_ids(clip_ids),valididx, Gdiff); + lostData = sum(idx); + if lostData ~= Gdiff.num_edges + error ('Inconsistent Data: edges in gdiff could not be found'); + end + + % --- + % save entries in DataPartition (remove from valid sets) + % NOTE: this is manhual intervention into Class territory :( + % --- + for j = 1:nRuns + jd = j + (i-1) * nRuns; + P.mtraining{jd} = ones(P.N, 1) - idx(valididx)'; + P.mtest{jd} = ones(P.N, 1) - idx(valididx)'; + P.TrainSize(jd) = P.N - lostData; + P.TestSize(jd) = P.N - lostData; + end +end +end + +% --- +% This finds all graphs edges in the ranking +% --- +function idx = find_graph_in_valid(r, clip_ids, valididx, G) + + idx = zeros(1,size(r,1)); + for i = find(valididx) + + a = clip_ids(i); + b = clip_ids(r{i,1}); + c = clip_ids(r{i,2}); + if G.edge(a,b,c) > 0 + idx(i) = 1; + end + end +end + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/ranking_from_comparison_trivial.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/ranking_from_comparison_trivial.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,90 @@ +function [r, valididx, clip_ids] = ranking_from_comparison_trivial(comparison, comparison_ids) +% returns a ranking for each of the compared songs +% +% r(idx,1) = all indexes which are similar to clip idx +% r(idx,2) = all indexes which are dissimilar to clip idx +% +% if onlyvalid is set to true, only the transitive hull of +% clips having sim and dissim values will be output + +% --- +% in comparison, the outlying piece is highlighted. +% thus, we naively consider that +% a. both of the remaining pieces are more similar to each other. +% b. the outlier is dissimilar to both of the other pieces +% --- + +[outsort, outidx] = sort(comparison(:,4:6),2,'ascend'); + +r = cell(numel(comparison_ids), 2); +s = zeros(numel(comparison_ids), 2); % score for rankings +for i = 1:size(comparison, 1) + + % get the three relevant comparison ids + + pos = comparison(i, outidx(i,1:3)); + vals = outsort(i,:); + + % --- + % each of the three clips will be evaluated based on its position + % the last clip is the potential outlyier + % --- + + % --- + % usual case: two small values and a last outlie + % --- + if vals(2) ~= vals(3) + + + % --- + % first (most common) clip: + % the second clip is more similar than the third one + % --- + r{pos(1),1} = cat(1, r{pos(1),1}, pos(2)); % similar + r{pos(1),2} = cat(1, r{pos(1),2}, pos(3)); % dissimilar +% else +% +% % ok, both seem more dissimilar to clip 1 +% r{pos(1),2} = cat(1, r{pos(1),1}, pos(2)); % both dissimilar +% r{pos(1),2} = cat (1, r{pos(1),2}, pos(3)); % dissimilar + end + + if vals(3) ~= vals(1) + % --- + % the second clip is more similar to the first than + % the third + % --- + r{pos(2),1} = cat(1, r{pos(2),1}, pos(1)); + r{pos(2),2} = cat(1, r{pos(2),2}, pos(3)); + + % --- + % the third clip is not similar to any of the former + % NOTE: we avoid this information as it possibly leads + % to some contradictionary statements + % --- + r{pos(3),2} = cat(1, r{pos(3),2}, pos(1)); + r{pos(3),2} = cat(1, r{pos(3),2}, pos(2)); + end + + +end + +% --- +% we cleanup r and +% run basic checks on emptiness and paradox assumptions +% --- +valididx = zeros(size(r,1), 1); +for i = 1:size(r, 1) + + % make unique + r{i,1} = unique(r{i,1}); + r{i,2} = unique(r{i,2}); + + % check + valididx(i) = ~isempty(r{i,1}) && ~isempty(r{i,2}) && ... + isempty(intersect(r{i,1}, r{i,2})); +end + +valididx = logical(valididx); +clip_ids = 1:numel(comparison_ids); + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/sim_from_comparison_UNfair_components.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/sim_from_comparison_UNfair_components.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,91 @@ +function [partBinTrn, partBinTst, partBinNoTrn] = sim_from_comparison_UNfair_components(comparison, comparison_ids, k, trainpart, filename) +% +% FOR ISMIR 2012 +% +% creates a cross-validation partitioning of the +% similarity data in "multiG", NOT PRESERVING the +% connected components in it during partitioning + +% --- +% get the similarity multigraph and remove cycles +% --- +cprint(2, 'creating graph') +% Gm = ClipSimGraphMulti(comparison, comparison_ids); +% Gm.remove_cycles_length2; +cprint(2, 'loading Multigraph for Similarity Constraints') +load('comp_SimGraphMulti.mat', 'G'); + + +% get similarity data +[weights, a, b, c] = G.similarities(); + +% --- +% We randomise the constraint succession +% --- +datPermu = randperm(numel(a)); +a = a(datPermu); +b = b(datPermu); +c = c(datPermu); +weights = weights(datPermu); + +% --- +% NOTE: we try the easy route: partition the graphs +% and look at which constraints balance we end up with +% --- +P = cvpartition(numel(a), 'k', k); + +% --- +% here we export similarity test sets +% --- +cprint(2, 'export test similarity') +partBinTst = {}; +for i = 1:P.NumTestSets % test runs + + partBinTst{i} = [a(P.test(i))' b(P.test(i))' c(P.test(i))' weights(P.test(i))]; +end + + +% --- +% Note: This uses a "truly" increasing training set +% to do the partial training partition +% --- +cprint(2, 'export train similarity') +for m = 1:numel(trainpart) + + % save test indices + Ptrain(m) = cvpartition_trunctrain_incsubsets(P, trainpart(m)); +end + +% --- +% here we export similarity training sets +% --- +partBinTrn = {}; +for i = 1:P.NumTestSets % train runs + + for m = 1:numel(trainpart) % increasing training sets + + % get training indices + idxB = Ptrain(m).training(i); + + % save into cell + partBinTrn{i,m} = [a(idxB)' b(idxB)' c(idxB)' weights(idxB)]; + end +end + +partBinNoTrn = {}; +for i = 1:P.NumTestSets % train runs + + for m = 1:numel(trainpart) % increasing training sets + + % get training indices + idxB = ~Ptrain(m).training(i) & ~Ptrain(m).test(i); + + % save into cell + partBinNoTrn{i,m} = [a(idxB)' b(idxB)' c(idxB)' weights(idxB)]; + end +end + +if nargin == 5 + save(filename, 'partBinTrn', 'partBinTst', 'partBinNoTrn') +end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/sim_from_comparison_attach_weights.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/sim_from_comparison_attach_weights.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,97 @@ +function sim_from_comparison_attach_weights(filen) + +global comparison; +global comparison_ids; +% load data from mat file +load(filen); + +% --- +% get the similarity multigraph and remove cycles +% --- +cprint(2, 'creating graph') +Gm = ClipSimGraphMulti(comparison, comparison_ids); +Gm.remove_cycles_length2; + +nTestSets = size(partBinTst, 2); % num cv bins +ntrainsizes = size(partBinTrn, 2); % num increases of training + +% --- +% test sets +% --- +cprint(2, 'doing test sets') + +% for all validations +for k = 1:size(partBinTst, 2); + + % for each constraint tiplet + for i = 1:size(partBinTst{k},1) + + % get weight for this edge from graph + [weight, ~, ~] = Gm.edge(partBinTst{k}(i,1), partBinTst{k}(i,2),... + partBinTst{k}(i,3)); + + if isempty(weight) || weight == 0 + error 'inconsistent similarity data'; + end + + % save into same data structure + partBinTst{k}(i,4) = weight; + end +end + +% --- +% train sets +% --- + +cprint(2, 'doing train sets') +% for all validations +for k = 1:size(partBinTrn, 1); + + % for all training sizes + for m = 1:size(partBinTrn, 2); + + % for each constraint tiplet + for i = 1:size(partBinTrn{k,m},1) + + % get weight for this edge from graph + [weight, ~, ~] = Gm.edge(partBinTrn{k,m}(i,1), partBinTrn{k,m}(i,2),... + partBinTrn{k,m}(i,3)); + + if isempty(weight) || weight == 0 + error 'inconsistent similarity data'; + end + + % save into same data structure + partBinTrn{k,m}(i,4) = weight; + end + end +end + +% --- +% Notrain sets +% --- +cprint(2, 'doing Notrain sets') +% for all validations +for k = 1:size(partBinNoTrn, 1); + + % for all training sizes + for m = 1:size(partBinNoTrn, 2); + + % for each constraint tiplet + for i = 1:size(partBinNoTrn{k,m},1) + + % get weight for this edge from graph + [weight, ~, ~] = Gm.edge(partBinNoTrn{k,m}(i,1), partBinNoTrn{k,m}(i,2),... + partBinNoTrn{k,m}(i,3)); + + if isempty(weight) || weight == 0 + error 'inconsistent similarity data'; + end + % save into same data structure + partBinNoTrn{k,m}(i,4) = weight; + end + end +end + +% save results to mat file +save(sprintf('%s_weighted.mat',filen), 'partBinTst', 'partBinTrn', 'partBinNoTrn'); diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/sim_from_comparison_fair_components.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/sim_from_comparison_fair_components.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,156 @@ +function [partBinTrn, partBinTst, partBinNoTrn] = sim_from_comparison_fair_components(comparison, comparison_ids, k, trainpart, filename) +% +% [partBinTrn, partBinTst, partBinNoTrn] = +% sim_from_comparison_fair_components(comparison, comparison_ids, k, trainpart, [filename]) + +% creates a cross-validation partitioning of the +% similarity data in "multiG", PRESERVING the +% connected components in it during partitioning + +% --- +% get the similarity multigraph and remove cycles +% --- +cprint(2, 'creating graph') +% Gm = ClipSimGraphMulti(comparison, comparison_ids); +% Gm.remove_cycles_length2; +cprint(2, 'loading Multigraph for Similarity Constraints') +load('comp_SimGraphMulti.mat', 'G'); + +% --- +% Note: we get the connected components in the graph +% and filter out those who have only one node +% --- +cprint(2, 'extracting connected components') +[Gs, s, id] = connected_components(G); + +valid = find(s > 1); +Gsv = Gs(valid); + +% --- +% We randomise the graph triplet order, +% as well as the in-component +% constraint succession be randomised here. +% --- +datPermu = randperm(numel(Gsv)); +Gsv = Gsv(datPermu); + +conPermu = zeros(numel(Gsv),3); +for i = 1:numel(Gsv) + conPermu(i,:) = randperm(3); +end + +% --- +% NOTE: we try the easy route: partition the graphs +% and look at which constraints balance we end up with +% --- +P = cvpartition(numel(Gsv), 'k', k); + +% --- +% here we export the graphs similarity test sets +% --- +cprint(2, 'export test similarity') +partBinTst = {}; +for i = 1:P.NumTestSets % test runs + partBinTst{i} = zeros(0, 3); + + tmp_idx = find(P.test(i)); + for j = 1:numel(tmp_idx); % componens + + % --- + % get the graphs which are associated + % to this set and save them into a new bin. + % --- + [weights, a, b, c] = Gsv(tmp_idx(j)).similarities(); + partBinTst{i} = [partBinTst{i}; [a' b' c' weights]]; + end +end + + +% --- +% Note: This uses a "truly" increasing training set +% to do the partial training partition +% --- +cprint(2, 'export train similarity') +for m = 1:numel(trainpart) + + Ptrain(m) = cvpartition_trunctrain_incsubsets(P, trainpart(m)); +end + +% --- +% here we export the graph's similarity training sets +% --- +partBinTrn = {}; +for i = 1:P.NumTestSets % train runs + + for m = 1:numel(trainpart) % increasing training sets + partBinTrn{i,m} = zeros(0, 3); + + tmp_idx = find(Ptrain(m).training(i)); + for j = 1:numel(tmp_idx); % components + + % --- + % get the graphs which are associated + % to this set and save them into a new bin. + % --- + [weights, a, b, c] = Gsv(tmp_idx(j)).similarities(); + + % --- + % NOTE: WE apply the inner-triplet permutation, + % and truncate it where necessary + % --- + tmp_permu = conPermu(tmp_idx(j),:); + if numel(a) < 3 + tmp_permu = tmp_permu(tmp_permu <= numel(a)); + end + + a = a(tmp_permu); + b = b(tmp_permu); + c = c(tmp_permu); + weights = weights(tmp_permu); + + % save the clips + partBinTrn{i,m} = [partBinTrn{i,m}; [a' b' c' weights]]; + end + end +end + +partBinNoTrn = {}; +for i = 1:P.NumTestSets % train runs + + for m = 1:numel(trainpart) % increasing training sets + partBinNoTrn{i,m} = zeros(0, 3); + + tmp_idx = find(~Ptrain(m).training(i) & ~Ptrain(m).test(i)); + for j = 1:numel(tmp_idx); % components + + % --- + % get the graphs which are associated + % to this set and save them into a new bin. + % --- + [weights, a, b, c] = Gsv(tmp_idx(j)).similarities(); + + % --- + % NOTE: WE apply the inner-triplet permutation, + % and truncate it where necessary + % --- + tmp_permu = conPermu(tmp_idx(j),:); + if numel(a) < 3 + tmp_permu = tmp_permu(tmp_permu <= numel(a)); + end + + a = a(tmp_permu); + b = b(tmp_permu); + c = c(tmp_permu); + weights = weights(tmp_permu); + + % save the clips + partBinNoTrn{i,m} = [partBinNoTrn{i,m}; [a' b' c' weights]]; + end + end +end + +if nargin == 5 + save(filename, 'partBinTrn', 'partBinTst', 'partBinNoTrn') +end + +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/sim_from_comparison_naive.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/sim_from_comparison_naive.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,73 @@ +function [sim, dissim, confidence] = sim_from_comparison_naive(comparison, comparison_ids, symmetrical) +% +% [sim, dissim, confidence] = sim_from_comparison_naive(comparison) +% +% derives symmetric, absolute similarity measurements +% from relative magnatagatune comparisons +% naive implementation for first tests of the ITML algorithm +% + +% reindex comparison for more simple evaluation +% makro_prepare_comparison + +% --- +% analyse the number of comparisons for each pair of songs +% --- +[num_compares] = get_comparison_stats(comparison, comparison_ids); + +% --- +% in comparison, the outlying piece is highlighted. +% thus, we naively consider that +% a. both of the remaining pieces are more similar to each other. +% b. the outlier is dissimilar to both of the other pieces +% --- +[outsort, outidx] = sort(comparison(:,4:6),2,'ascend'); + +% --- +% similarity of the two non-outliers a, b +% they are similar if both of them have scores way smaller +% than the outlier c: +% score (a,b) = 1 - (max(a,b)/c) +% +% dissimilarity: clip b is considered more different to clip c than +% a, as clip a seems to share some properties with both songs +% dissim(b,c) = 0.5 + b/(2c) +% --- + +sim = sparse(numel(comparison_ids),numel(comparison_ids)); +dissim = sparse(numel(comparison_ids),numel(comparison_ids)); +for i = 1:size(comparison,1) + + % get the outlier votes + simpair = comparison(i,outidx(i,1:2)); + c = comparison(i,outidx(i,3)); + + % we want a triangular similarity matrix + [simpair, simidx] = sort(simpair); + outsort(i,1:2) = outsort(i,simidx); + + % --- + % save the distance between the second biggest vote and the max vote. + % NOTE: we bias the vote by dividing through the number of total + % comparisons for the particular pair of clips + % --- + sim(simpair(1), simpair(2)) = sim(simpair(1), simpair(2)) + ... + (1 - outsort(i,2) / outsort(i,3)) * (1 / num_compares(simpair(1),simpair(2))); + + dissim(simpair(1:2), c) = 0.5 + (outsort(i,1:2) ./ (2 * outsort(i,3))); +end + +% --- +% mirror to make matrix symmetrical +% --- +if nargin == 3 && symmetrical + sim = sim + sim'; + dissim = dissim + dissim'; +end + +% --- +% TODO: use number of votes and std or similar to +% rate the confidence for each similarity mesurement +% --- +confidence = []; + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tag_stats.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tag_stats.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,31 @@ +function out = tag_stats(annots, names, max_plot) +% out = tag_stats(annots, names, max_plot) +% +% calculates statistics for tag distributions +% and plots them + +if nargin < 3 + max_plot = 25; +end + +% get overall frequency and sort accordingly +allapp = sum(annots > 0, 1); +% allapp = allapp/max(allapp); + +[null, idx] = sort(allapp,'descend'); + +% --- +% visualize only the fist top 200 +% --- +max_plot = min(numel(names), max_plot); + +figure; +bar(1:max_plot,allapp(idx(1:max_plot))) +set(gca,'XTick',1:max_plot); +set(gca,'XTickLabel',names(idx(1:max_plot))); +axis([1 max_plot 0 max(allapp)]) + +legend('#appearances'); +title 'tag statistics sorted by frequency of appearances' + +out = []; \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/do_test_rounds.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/do_test_rounds.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,211 @@ +function [out]= do_test_rounds(trainfun, X, simdata, trainparams, fparams,... + paramhash, paramhash_train, clips) + +% --- +% DEBUG: we mix up the training set +% --- +% simdata = mixup(simdata); + +nTestSets = size(simdata.partBinTst, 2); % num cv bins +ntrainsizes = size(simdata.partBinTrn, 2); % num increases of training + +for m = 1:ntrainsizes + + ok_train = zeros(2, nTestSets); + ok_test = zeros(2, nTestSets); + equal_test = zeros(1, nTestSets); + ok_notin_train = zeros(2, nTestSets); + + % parfor + for k = 1:nTestSets + + + % runlog mlr + try + + % --- + % Get the training constraints and features for this round + % --- + % DEBUG: the similarity data in Ytrain and Ytest seems correct. + [clips_train{k}, Xtrain, Ytrain{k}] ... + = get_data_compact(clips, X, simdata.partBinTrn{k,m}); + Ytest{k}={}; + + % training step + [A{k}, dout{k}] = feval(trainfun, Xtrain, Ytrain{k}, trainparams); + + % --- + % test step + % TODO: the distmeasure object could be created by the wrapper! + % --- + if isnumeric(A{k}) + % mahalanobis case + diss = DistMeasureMahal(clips, A{k}, X); + else + % neural network case: A{k} is a neural net object + diss = DistMeasureNNet(clips, A{k}, X); + end + + % test training data + [ok_train(:,k)] = metric_fulfills_ranking... + (diss, Ytrain{k}, MTTClip(clips_train{k})); + + % get test data + [clips_test{k}, Xtest, Ytest{k}] ... + = get_data_compact(clips, X, simdata.partBinTst{k}); + + % diss = DistMeasureMahal(MTTClip(clips_test{k}), A{k}, Xtest); + % test test data + [ok_test(:,k), equal_test] = metric_fulfills_ranking... + (diss, Ytest{k}, MTTClip(clips_test{k})); + cprint(3,'%2.2f validation error', 1-ok_test(1,k)); + + % --- + % this gives data for the unused training set remainders + % --- + if isfield(simdata,'partBinNoTrn') + if ~isempty(simdata.partBinNoTrn{k,m}) + [clips_notin_train{k}, X_notin_train, Y_notin_train{k}] ... + = get_data_compact(clips, X, simdata.partBinNoTrn{k,m}); + + % test unused training data + [ok_notin_train(:,k), equal_test] = metric_fulfills_ranking... + (diss, Y_notin_train{k}, MTTClip(clips_notin_train{k})); + + % what to do if there is no data ? + else + ok_notin_train(:,k) = -1; + end + else + ok_notin_train(:,k) = -1; + end + + catch err + + % --- + % in case training or test fails + % --- + print_error(err); + + A{k} = []; + dout{k} = -1; + + ok_test(:,k) = -1; + ok_train(:,k) = -1; + ok_notin_train(:,k) = -1; + equal_test(k) = -1; + + % --- + % save feature, system and data configuration + % and indicate failure + % --- + xml_save(sprintf('runlog_%s.%s_trainparam.xml',... + paramhash, paramhash_train), trainparams); + xml_save(sprintf('runlog_%s.%s_err.xml',... + paramhash, paramhash_train), print_error(err)); + end + end + + if ~(ntrainsizes == 1) + + % save elaborate testing data + size_sum = 0; + for i = 1:nTestSets + size_sum = size_sum + size(simdata.partBinTrn{i,m}) / size(simdata.partBinTrn{i,end}); + end + size_sum = size_sum / nTestSets; + + out.inctrain.trainfrac(:, m) = size_sum; + out.inctrain.dataPartition(:, m) = 0; + + % --- + % NOTE: the max value is important for debugging, + % especially when the maximal training success is reached + % in the middle of the data set + % --- +% out.inctrain.max_ok_test(:, m) = max(ok_test, 2); + out.inctrain.mean_ok_test(:, m) = mean(ok_test(:, ok_test(1,:) >=0), 2); + out.inctrain.var_ok_test(:, m) = var(ok_test(:, ok_test(1,:) >=0), 0, 2); + out.inctrain.equal_test(m) = median(equal_test); + + out.inctrain.mean_ok_train(:, m) = mean(ok_train(:, ok_train(1,:) >=0), 2); + out.inctrain.var_ok_train(:, m) = var(ok_train(:, ok_train(1,:) >=0), 0, 2); + + % --- + % TODO: DEBUG: this does not work correctly + % maybe thats also true for the above? + % --- + out.inctrain.mean_ok_notin_train(:, m) = mean(ok_notin_train(:, ok_notin_train(1,:) >=0), 2); + out.inctrain.var_ok_notin_train(:, m) = var(ok_notin_train(:, ok_notin_train(1,:) >=0), 0, 2); + + diag.inctrain(m).ok_train = ok_train; + diag.inctrain(m).ok_test = ok_test; + diag.inctrain(m).ok_notin_train = ok_notin_train; + diag.inctrain(m).equal_test = equal_test; + end + + % --- + % save traditional information for full training set + % --- + if size(simdata.partBinTrn{1,m}) == size(simdata.partBinTrn{1,end}); + +% out.max_ok_test = max(ok_test, 2); + out.mean_ok_test = mean(ok_test(:, ok_test(1,:) >=0), 2); + out.var_ok_test = var(ok_test(:, ok_test(1,:) >=0), 0, 2); + out.equal_test = median(equal_test); + + out.mean_ok_train = mean(ok_train(:, ok_train(1,:) >=0), 2); + out.var_ok_train = var(ok_train(:, ok_train(1,:) >=0), 0, 2); + + % --- + % TODO: DEBUG: this does not work correctly + % --- + out.mean_ok_notin_train = mean(ok_notin_train(:, ok_notin_train(1,:) >=0), 2); + out.var_ok_notin_train = var(ok_notin_train(:, ok_notin_train(1,:) >=0), 0, 2); + + % --- + % get winning measure + % we use the weighted winning measure if possible + % --- + if max(ok_test(2,:)) > 0 + [~, best] = max(ok_test(2,:)); + else + [~, best] = max(ok_test(1,:)); + end + + diag.A = A; + diag.diag = dout; + + diag.ok_test = ok_test; + diag.equal_test = equal_test; + diag.ok_train = ok_train; + diag.ok_notin_train = ok_notin_train; + + % save some metric matrices + out.best_A = A{best}; + out.best_diag = dout{best}; + out.best_idx = best; + end +end + +% save parameters +out.camirrev = camirversion(); +out.fparams = fparams; +out.trainfun = trainfun; +out.trainparams = trainparams; +out.clip_ids = clips.id(); +out.dataPartition = []; +out.Y = size(simdata); +% --- +% NOTE: this takes A LOT OF DISC SPACE +% --- +% out.Ytrain = Ytrain{end}; +% out.Ytest = Ytest{end}; + +% --- +% save the diagostics data to disk +% --- +save(sprintf('runlog_%s.%s_results.mat',... + paramhash, paramhash_train),... + 'out', 'diag'); +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/euclidean_wrapper.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/euclidean_wrapper.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ +function [A, diag] = euclidean_wrapper(X, Y, trainparams) +% dummy wrapper to plug the euclidean distance measure into the +% testing set + +% --- +% call main training function +% --- +A = eye(size(X,1)); + +diag.trainfun = 'euclidean'; \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/get_data_compact.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/get_data_compact.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,93 @@ +function [clip_dict, X, Y, valididx] = get_data_compact(clips, featureVec, simdata) + % --- + % instead of creating a new three feature vectors + % for each constraint, this function links back to the + % FIRST occurrence of a distinct clip and its features + % + % TODO: try to always link ALL occurences of a feature + % / clip in the rankings + % --- + + % --- + % build the clip dictionary + % --- + uniclip = unique(reshape( simdata, 1, [])); + last_clip_pos = zeros(1, numel(uniclip)); + + clip_dict = []; + Y = {}; + idx_newclip = 1; + valididx = logical([]); + + % cycle all constraints of one cv bin + for j = 1:size(simdata,1) + + + % save the first clip + constraint = simdata(j,:); + + % --- + % check where the clip is and if there + % is space to write cosntraint data + % + % If not, a position is created + a = find_clip(constraint(1), 1); + b = find_clip(constraint(2), 0); + c = find_clip(constraint(3), 0); + + % [a,b,c] -> clip a more similar to clip b than to clip c + + % adress weightings + if numel(constraint) == 4 + Y(a,:) = {b, c, (constraint(4))}; + else + Y(a,:) = {b, c}; + end + end + + % --- + % get feature data + % NOTE: this uses an inverse index of the comparison clips + % --- + X = featureVec(:, clips(1).my_db.comparison_ids(clip_dict)); + + for i = find(~valididx) + if size(Y,2) == 3 + Y(i,:) = {[], [], 0}; + else + Y(i,:) = {[], []}; + end + end + + function clip_idx = find_clip(clip, write) + uc_idx = find(uniclip == clip); + + % --- + % do we have a place of this clip? + % + % NOTE: last_clip_pos has to be reset after every write + % --- + if (last_clip_pos(uc_idx) == 0) || ... + write && (numel(valididx) >= last_clip_pos(uc_idx)) &&... + (valididx(last_clip_pos(uc_idx)) == 1) + + % if not, create one + clip_idx = idx_newclip; + clip_dict(clip_idx) = clip; + valididx(clip_idx) = false; + + % and save into last pos + last_clip_pos(uc_idx) = clip_idx; + + idx_newclip = idx_newclip + 1; + else + clip_idx = last_clip_pos(uc_idx); + end + + if write + valididx(clip_idx) = true; + end + + end + +end diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/mlr/mlr_diag_fullstobgenrefeatures_fullsim_ISMIR12.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/mlr/mlr_diag_fullstobgenrefeatures_fullsim_ISMIR12.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,48 @@ +% --- +% vary feature parameters of mixed features +% --- +% +ftype = 'MTTMixedFeatureStober11Genre'; + +fparams_all = struct(... + ... % --- + ... % these are Stober11 parameters + ... % --- + 'stob_lowaudio', 1, ... + 'stob_highaudio', 1, ... % + 'stob_tags', 1, ... + 'stob_norm', 1, ... + ... % --- + ... % following are GenreBasic parameters + ... % --- + 'pct_genres', 1, ... % 1/100 percentile genre tags used + 'empty_genres', 0 ... % allow empty genres to persist + ); + + + +% --- +% vary parameters for mlr +% --- +trainparams_all = struct(... + 'C', [10^14], ... % best: 10^10: 68,84 + 'REG',[1], ... + 'LOSS',{{'AUC'}}, ... + 'k', 0, ... + 'Diagonal', [1],... + 'weighted', [0], ... + 'B', 0, ... + 'ConstrClock', [20], ... %def: 20 + 'E', [1e-3], ... % def: 1e-3 + 'dataset',{{'comp_partBinData_ISMIR12_01'}} ... + ); + +% set training function +trainfun = @mlr_wrapper; + +akt_dir = migrate_to_test_dir(); + +% call eval : changed to generic +out = test_generic_features_parameters_crossval... + (fparams_all, trainparams_all, trainfun, ftype); + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/mlr/mlr_diag_fullstobgenrefeatures_nonclusteredsim_ISMIR12.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/mlr/mlr_diag_fullstobgenrefeatures_nonclusteredsim_ISMIR12.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,47 @@ +% --- +% vary feature parameters of mixed features +% --- +% +ftype = 'MTTMixedFeatureStober11Genre'; + +fparams_all = struct(... + ... % --- + ... % these are Stober11 parameters + ... % --- + 'stob_lowaudio', 1, ... + 'stob_highaudio', 1, ... % + 'stob_tags', 1, ... + 'stob_norm', 1, ... + ... % --- + ... % following are GenreBasic parameters + ... % --- + 'pct_genres', 1, ... % 1/100 percentile genre tags used + 'empty_genres', 0 ... % allow empty genres to persist + ); + + + +% --- +% vary parameters for mlr +% --- +trainparams_all = struct(... + 'C', [10^14], ... + 'REG',[1], ... + 'LOSS',{{'AUC'}}, ... + 'k', 0, ... + 'Diagonal', [1],... + 'weighted', [0], ... + 'B', 0, ... + 'ConstrClock', [20], ... %def: 20 + 'E', [1e-3], ... % def: 1e-3 + 'dataset',{{'comp_partBinData_unclustered_ISMIR12_01'}} ... + ); + +% set training function +trainfun = @mlr_wrapper; + +akt_dir = migrate_to_test_dir(); + +% call eval : changed to generic +out = test_generic_features_parameters_crossval... + (fparams_all, trainparams_all, trainfun, ftype); diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/mlr/mlr_fullstobgenrefeatures_fullsim_ISMIR12.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/mlr/mlr_fullstobgenrefeatures_fullsim_ISMIR12.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,47 @@ +% --- +% vary feature parameters of mixed features +% --- +% +ftype = 'MTTMixedFeatureStober11Genre'; + +fparams_all = struct(... + ... % --- + ... % these are Stober11 parameters + ... % --- + 'stob_lowaudio', 1, ... + 'stob_highaudio', 1, ... % + 'stob_tags', 1, ... + 'stob_norm', 1, ... + ... % --- + ... % following are GenreBasic parameters + ... % --- + 'pct_genres', 1, ... % 1/100 percentile genre tags used + 'empty_genres', 0 ... % allow empty genres to persist + ); + + + +% --- +% vary parameters for mlr +% --- +trainparams_all = struct(... + 'C', [10^12], ... % best: 10^5: 68,20 10^10:67,1 + 'REG',[1], ... + 'LOSS',{{'AUC'}}, ... + 'k', 0, ... + 'Diagonal', [0],... + 'weighted', [0], ... + 'B', 0, ... + 'ConstrClock', [20], ... %def: 20 + 'E', [1e-3], ... % def: 1e-3 + 'dataset',{{'comp_partBinData_ISMIR12_01'}} ... + ); + +% set training function +trainfun = @mlr_wrapper; + +akt_dir = migrate_to_test_dir(); + +% call eval : changed to generic +out = test_generic_features_parameters_crossval... + (fparams_all, trainparams_all, trainfun, ftype); diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/mlr/mlr_fullstobgenrefeatures_nonclusteredsim_ISMIR12.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/mlr/mlr_fullstobgenrefeatures_nonclusteredsim_ISMIR12.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,48 @@ +% --- +% vary feature parameters of mixed features +% --- +% +ftype = 'MTTMixedFeatureStober11Genre'; + +fparams_all = struct(... + ... % --- + ... % these are Stober11 parameters + ... % --- + 'stob_lowaudio', 1, ... + 'stob_highaudio', 1, ... % + 'stob_tags', 1, ... + 'stob_norm', 1, ... + ... % --- + ... % following are GenreBasic parameters + ... % --- + 'pct_genres', 1, ... % 1/100 percentile genre tags used + 'empty_genres', 0 ... % allow empty genres to persist + ); + + +% --- +% vary parameters for mlr +% --- +trainparams_all = struct(... + 'C', [10^12], ... % best: 10^12: 75,58 + 'REG',[1], ... + 'LOSS',{{'AUC'}}, ... + 'k', 0, ... + 'Diagonal', [0],... + 'weighted', [0], ... + 'B', 0, ... + 'ConstrClock', [20], ... %def: 20 + 'E', [1e-3], ... % def: 1e-3 + 'dataset',{{'comp_partBinData_unclustered_ISMIR12_01'}} ... + ); + +% set training function +trainfun = @mlr_wrapper; + + +akt_dir = migrate_to_test_dir(); + +% call eval +out = test_generic_features_parameters_crossval... + (fparams_all, trainparams_all, trainfun, ftype); + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/mlr/mlr_wrapper.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/mlr/mlr_wrapper.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,42 @@ +function [A, diag] = mlr_wrapper(X, Y, mlrparams) +% wrapper to make MLR accessible via a general interface + +% --- +% transform params +% --- + +% --- +% transform ranking data +% --- + +% --- +% call main training function +% --- + +% --- +% NOTE: WE use Y(:,3) for repeating rankings +% which have weight > 1 +% --- +if isfield(mlrparams,'weighted') && mlrparams.weighted + if mlrparams.weighted > 1 + + % --- + % NOTE: this is to maintain a tradeoff between weighting + % and space usage : + % + % scale the rating if the "weighted" parameter + % gives a maximum weight + % + % TODO: try logarithmic weight scaling + % --- + Y = scale_ratings(Y, mlrparams.weighted); + end + + [Y, X] = mlr_repeat_YX_by_rating(Y, X); +end + +% training step +[A, diag.Xi, diag.D] = mlr_train(X, Y(:,1:2),... + mlrparams.C, mlrparams.LOSS, mlrparams.k,... + mlrparams.REG, mlrparams.Diagonal, mlrparams.B, ... + mlrparams.ConstrClock, mlrparams.E); \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/random_diag_wrapper.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/random_diag_wrapper.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ +function [A, diag] = random_diag_wrapper(X, Y, trainparams) +% dummy wrapper to plug a random distance measure into the +% testing process + +% --- +% call main training function +% --- +A = diag(rand(1, size(X,1))); + +diag.trainfun = 'random_diag'; \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/svm_light/svm_test_stobermyfeat_ismir12.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/svm_light/svm_test_stobermyfeat_ismir12.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,41 @@ +global globalvars; +globalvars.debug = 3; + +% --- +% vary feature parameters of mixed features +% --- +% +ftype = 'MTTMixedFeatureStober11Genre'; + +fparams_all = struct(... + ... % --- + ... % these are Stober11 parameters + ... % --- + 'stob_lowaudio', 1, ... + 'stob_highaudio', 1, ... % + 'stob_tags', [1], ... % percent + 'stob_norm', 1, ... + ... % --- + ... % following are GenreBasic parameters + ... % --- + 'pct_genres', [1], ... % 1/100 percentile genre tags used: 6 percent + 'empty_genres', 0 ... % allow empty genres to persist + ); + +% --- +% vary parameters for svmlight +% --- +trainparams_all = struct(... + 'C', [0.7], ... % set 1 : 1.0; set2 :1.9 + 'weighted', [0], ... + 'dataset',{{'comp_partBinData_ISMIR12_01'}} ... + ); + +% set training function +trainfun = @svmlight_wrapper; + +akt_dir = migrate_to_test_dir(); + +% call eval : changed to generic +out = test_generic_features_parameters_crossval... + (fparams_all, trainparams_all, trainfun, ftype); diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/svm_light/svm_test_stobermyfeat_unclustered_ismir12.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/svm_light/svm_test_stobermyfeat_unclustered_ismir12.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,41 @@ + + +% --- +% vary feature parameters of mixed features +% --- +% +ftype = 'MTTMixedFeatureStober11Genre'; + +fparams_all = struct(... + ... % --- + ... % these are Stober11 parameters + ... % --- + 'stob_lowaudio', 1, ... + 'stob_highaudio', 1, ... % + 'stob_tags', [1], ... % percent + 'stob_norm', 1, ... + ... % --- + ... % following are GenreBasic parameters + ... % --- + 'pct_genres', [1], ... % 1/100 percentile genre tags used: 6 percent + 'empty_genres', 0 ... % allow empty genres to persist + ); + +% --- +% vary parameters for svmlight +% --- +trainparams_all = struct(... + 'C', [0.7], ... % set 1 : 1.0 set2 :2.x + 'weighted', [0], ... + 'dataset',{{'comp_partBinData_unclustered_ISMIR12_01'}} ... + ); + +% set training function +trainfun = @svmlight_wrapper; + +akt_dir = migrate_to_test_dir(); + +% call eval : changed to generic +out = test_generic_features_parameters_crossval... + (fparams_all, trainparams_all, trainfun, ftype); + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/svm_light/svmlight_wrapper.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/svm_light/svmlight_wrapper.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,68 @@ +function [A, diag] = svmlight_wrapper(X, Ytrain, trainparams) +% wrapper to make ITML accessible via a general interface + +% define model file +modelfile = 'tmp_model.dat'; +% define constraints file +trainfile = 'tmp_train.dat'; + +% --- +% include weighting from Y +% --- +if isfield(trainparams,'weighted') && trainparams.weighted + if trainparams.weighted > 1 + + % --- + % NOTE: this is to maintain a tradeoff between weighting + % and space usage : + % + % scale the rating if the "weighted" parameter + % gives a maximum weight + % + % TODO: try logarithmic weight scaling + % --- + Ytrain = scale_ratings(Ytrain, trainparams.weighted); + end + + % --- + % get squared pointwise distance for labeled features + % --- + [lhs, rhs, c] = get_svmlight_inequalities_from_ranking(Ytrain, X); + + % save to data file to disk; + success = save_svmlight_inequalities(lhs, rhs, c, trainfile); +else + % --- + % get squared pointwise distance for labeled features + % --- + [lhs, rhs] = get_svmlight_inequalities_from_ranking(Ytrain, X); + success = save_svmlight_inequalities(lhs, rhs, trainfile); + +end + +if ~success + error 'cannot write svm training file' +end + +% call svmlight solver +% D = dos(sprintf('svm_learn -z o -c %d %s %s', C, trainfile, modelfile)); +[diag, ~] = evalc('dos(sprintf(''svm_learn -z o -c %d %s %s'', trainparams.C, trainfile, modelfile));'); + +% Strip some dots from the display +diag = strrep(diag,'.......',''); +cprint(2, diag) + +% --- +% get dual weight vector +% TODO: check whether this the actual w in Schultz2003 +% --- +w = svmlight2weight(modelfile); + +% prolong w to feature dimension +w = [w; zeros(size(X,1) - numel(w),1)]; + +% --- +% matrix from weights +% --- +A = spdiags(w, 0, numel(w), numel(w)); +end diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/test_generic_display_results.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/test_generic_display_results.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,392 @@ +function [out, stats, features, individual] = test_generic_display_results(file) +% [out, stats] = test_generic_display_results() +% +% [out, stats, features, individual] = test_generic_display_results() +% +% displays the finalresults mat file and enables +% further analysis and duiagnostics of the individual runs + +global comparison; +global comparison_ids; + +if nargin < 1 || isempty(file) || isnumeric(file) + u = dir(); + u = {u.name}; + [idx, strpos] = substrcellfind(u, '_finalresults.mat', 1); + + if numel(idx) < 1 + error 'This directory contains no valid test data'; + end + + if exist('file','var') && isnumeric(file) + file = u{idx(file)}; + else + if numel(idx) > 1 + file = u{idx(ask_dataset())}; + else + file = u{idx(1)}; + end + end +end + + +% --- +% LOAD THE RESULT DATA +% We have: +% Y +% out.fparams +% trainparams +% dataPartition +% mean_ok_test +% var_ok_test +% mean_ok_train +% --- +load(file); + +% compability +if isfield(out, 'mlrparams') + for i = 1:numel(out) + out(i).trainparams = out(i).mlrparams; + end +end + + +% --- +% % get statistics for feature parameters +% Visualise the accuracy and variance +% --- +if isfield(out, 'inctrain') + for i = 1:numel(out) + + figure; + boxplot([out(i).inctrain.mean_ok_test], sqrt([out(i).inctrain.var_ok_test]), [out(i).inctrain.mean_ok_train]); + set(gca,'XTick',1:numel(out(i).inctrain.trainfrac), ... + 'XTickLabel', out(i).inctrain.trainfrac* 100); + + xlabel ('fraction of training data'); + title (sprintf('increasing training size test, config %d',i)); + legend('train', 'train weighted', 'test', 'test weighted'); + + end +end + + +if numel([out.mean_ok_test]) > 1 + + % plot means % plot std = sqrt(var) % plot training results + figure; + boxplot([out.mean_ok_test], sqrt([out.var_ok_test]), [out.mean_ok_train]); + title (sprintf('Performance for all configs')); +end + + +% --- +% write max. test success +% --- +mean_ok_test = [out.mean_ok_test]; +[val, idx] = max(mean_ok_test(1,:)); +fprintf(' --- Maximal test set success: nr. %d, %3.2f percent. --- \n', idx, val * 100) + +% --- +% display parameter statistics +% --- +stats = test_generic_display_param_influence(out); + + +if nargout < 3 + return; +end +% --- +% display statistics and get features +% for run with best test success +% --- +[resfile, featfile] = get_res_filename(out, idx); + +% --- +% import features: +% 1. reset databse +% 2. import features +% 3. assign to clip ids as in ranking +% --- +type = MTTAudioFeatureDBgen.import_type(featfile); +db_name = MTTAudioFeatureDBgen.db_name(type); +eval(sprintf('global %s', db_name)); +eval(sprintf('%s.reset();', db_name)); +eval(sprintf('features = %s.import(featfile);', db_name)); + +if isfield(out,'clip_ids') + clips = MTTClip(out(1).clip_ids); + features = clips.features(type); +end + +% --- +% Display Metric Stats +% tmp = test_mlr_display_metric_stats(individual.out, individual.diag, features); +% --- + +if nargout < 4 + return; +end +individual = load(resfile); +for i = 1:numel(out) + + [resfile, featfile] = get_res_filename(out, idx); + + % reset db and load testing features + eval(sprintf('global %s', db_name)); + eval(sprintf('%s.reset();', db_name)); + eval(sprintf('%s.import(featfile);', db_name)); + + % load individual results + if i == 1; + + individual = load(resfile); + else + + individual(i) = load(resfile); + end +end +end + +function out = ask_dataset() +% --- +% displays the parameters of the datasets, +% and asks for the right one to display +% --- +clc; +u = dir(); +u = {u.name}; +[idx, strpos] = substrcellfind(u, '_params.mat', 1); + +for i = 1:numel(idx) + file = u{idx(i)}; + fprintf('------------ Dataset nr. %d --------------',i); + type(file); +end + +out = (input('Please choose the dataset number: ')); +end + + +function [resfile, featfile] = get_res_filename(out, i) +% get filename given test results and index + + paramhash = hash(xml_format(out(i).fparams),'MD5'); + + paramhash_mlr = hash(xml_format(out(i).trainparams),'MD5'); + + featfile = sprintf('runlog_%s_feat.mat', paramhash); + + resfile = sprintf('runlog_%s.%s_results.mat',... + paramhash, paramhash_mlr); +end + + +function boxplot(mean, std, train); + + bar([train; mean]', 1.5); + hold on; + errorbar(1:size(mean,2), mean(1,:), std(1,:),'.'); +% plot(train,'rO'); + colormap(spring); + axis([0 size(mean,2)+1 max(0, min(min([train mean] - 0.1))) max(max([train mean] + 0.1))]); +end + +function stats = test_generic_display_param_influence(results) +% returns the mean accuracy influence of each feature and training parameter +% +% the influence is measured by comparing the mean +% achieved accuracy for all tries with each specific +% parameter being constant +% +% TODO: evaluate how the comparisons of all configuration +% tuples twith just the specific analysed parameter +% changing differ from the above approach + +% get statistics for feature parameters +stats.fparams = gen_param_influence(results, 'fparams'); + +% get statistics for feature parameters +if isfield(results, 'trainparams') + + stats.trainparams = gen_param_influence(results, 'trainparams'); + + % the following case is for backwards compability +elseif isfield(results, 'mlrparams') + + stats.trainparams = gen_param_influence(results, 'mlrparams'); +end + +% display results +if ~isempty(stats.fparams) || ~isempty(stats.trainparams) + figure; +end + +if ~isempty(stats.fparams) + + subplot(2,1,1); + display_param_influence(stats.fparams); +end + +if ~isempty(stats.trainparams) + + subplot(2,1,2); + display_param_influence(stats.trainparams); +end + +end + +% --- +% gen_param_influence +% --- +function stats = gen_param_influence(results, paramname) +% generates statistics given results and parameter type as string. + +% get individual fields of this parameter set +ptypes = fieldnames(results(1).(paramname)); + +for i = 1:numel(ptypes) + % --- + % get all individual configurations of this parameter. + % --- + allvals = [results.(paramname)]; + + % take care of string args + if ~ischar(allvals(1).(ptypes{i})) + + allvals = [allvals.(ptypes{i})]; + else + allvals = {allvals.(ptypes{i})}; + end + + % save using original parameter name + tmp = param_influence(results, allvals); + + if ~isempty(tmp) + stats.(ptypes{i}) = tmp; + end +end + +if ~exist('stats','var') + stats = []; +end + +end + + +% --- +% param_influence +% --- +function out = param_influence(results, allvals) +% give the influence (given results) for the parameter settings +% given in allvals. +% +% numel(results) = numel(allvals) + + % --- + % get all different settings of this parameter. + % NOTE: this might also work results-of the box for strings. + % not tested, below has to be changed ot cell / matrix notations + % --- + entries = unique(allvals); + + % just calculate for params with more than one option + if numel(entries) < 2 || ischar(entries) + + out = []; + return; + end + + % calculate statstics for this fixed parameter + for j = 1:numel(entries) + + % care for string parameters + if ~(iscell(allvals) && ischar(allvals{1})) + valid_idx = (allvals == entries(j)); + + % mean_ok_test + valid_ids = find(valid_idx); + else + valid_ids = strcellfind(allvals, entries{j}, 1); + end + % --- + % get the relevant statistics over the variations + % of the further parameters + % --- + + mean_ok_testval = []; + for i = 1:numel(valid_ids) + mean_ok_testval = [mean_ok_testval results(valid_ids(i)).mean_ok_test(1,:)]; + end + + mean_ok_test(j) = struct('max', max(mean_ok_testval), ... + 'min', min(mean_ok_testval), ... + 'mean', mean(mean_ok_testval)); + end + + % --- + % get the statistics over the different values + % this parameter can hold + % --- + [best, absolute.best_idx] = max([mean_ok_test.max]); + [worst, absolute.worst_idx] = min([mean_ok_test.max]); + + % --- + % get differences: + difference.max = max([mean_ok_test.max]) - min([mean_ok_test.max]); + + % format output + out.entries = entries; + out.mean_ok_test = mean_ok_test; + out.difference = difference; + out.absolute = absolute; +end + + +% --- +% display +% --- +function a = display_param_influence(stats) + +if isempty(stats) + return; +end + +ptypes = fieldnames(stats); + +dmean = []; +dmax = []; +best_val = {}; +for i = 1:numel(ptypes) + + % serialise the statistics +% dmean = [dmean stats.(ptypes{i}).difference.mean]; + dmax = [dmax stats.(ptypes{i}).difference.max]; + best_val = {best_val{:} stats.(ptypes{i}).entries( ... + stats.(ptypes{i}).absolute.best_idx) }; + + % take care of string args + if isnumeric(best_val{i}) + lbl{i} = sprintf('%5.2f' ,best_val{i}); + else + lbl{i} = best_val{i}; + end +end + + +bar([dmax]'* 100); +colormap(1-spring); +% legend({'maximal effect on mean correctness'}) +xlabel('effect on max. correctness for best + worst case of other parameters'); +ylabel('correctness (0-100%)'); +a = gca; +set(a,'XTick', 1:numel(ptypes), ... + 'XTickLabel', ptypes); + +% display best param results +for i = 1:numel(ptypes) + text(i,0,lbl{i}, 'color','k'); +end + +end + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/tests_evals/test_generic_features_parameters_crossval.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/tests_evals/test_generic_features_parameters_crossval.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,190 @@ +function out = test_generic_features_parameters_crossval... + (fparams_all, trainparams_all, trainfun, featuretype) +% +% this is a special function designed for the ISMIR 2012 +% publication. A fixed and partitioned similarity dataset is used, +% and the clip set as well as the features are SPECIFICALLY SELECTED +% to each combination of training and test sets. + +% svn hook +my_revision = str2double(substr('$Rev$', 5, -1)); +[ST,I] = dbstack(); +testscript = ST(end).name; + +global globalvars; +global comparison_ids; +eval(sprintf('global %s', MTTAudioFeatureDBgen.db_name(featuretype))); + +% --- +% get all combinations resulting from the above parameter set +% descriptions +% --- +fparams = param_combinations(fparams_all); + +trainparams = param_combinations(trainparams_all); + +% --- +% the clips: sorted by comparison_id +% --- +clips = MTTClip(comparison_ids); + +% --- +% provide some timing information +% --- +nruns = numel(fparams); +tatic = []; +tatoc = []; +ftoc = []; +mlrtic = []; +mlrtoc = []; +runs = 0; + +res = []; +% TEST +for i = 1:numel(fparams) + + % TIMING start + runs = runs + 1; + tatic(end+1) = cputime(); + + MTTAudioFeatureDBgen.reset_feature_dbs('exclude',{'db_magnaaudiofeat'}); +% eval('%s.reset;', MTTAudioFeatureDBgen.db_name(featuretype)); + + % extract features + try + % --- + % try loading old features with the same parameter hash. + % we use the md5 hash to distinguish between features + % --- + paramhash = MTTAudioFeature.param_hash(featuretype, fparams(i)); + featfile = sprintf('runlog_%s_feat.mat', paramhash); + + if exist(featfile,'file') == 2 + + eval(sprintf('%s.import(featfile);', MTTAudioFeatureDBgen.db_name(featuretype))); + + % --- + % Here, we make sure the clips are + % associated to the feature values + % --- + features = clips.features(featuretype, fparams(i)); + X = features.vector(); + + else + % --- + % we extract the mixed features, but leave the option of + % using no tags + % --- + features = clips.features(featuretype, fparams(i)); + features.define_global_transform(); + + % get the feature vector + X = features.vector(); + + % save features with specific filename to disc + xml_save(sprintf('runlog_%s_param.xml', paramhash), fparams(i)); + features.saveto(featfile); + end + + + % TIMING + ftoc(end+1) = cputime - tatic(end); + fprintf('Got features. took %2.2f minutes\n', ftoc(end) / 60); + catch err + + print_error(err); + + % --- + % TODO: save feature configuration and indicate faliure + % --- + if ~exist(paramhash) + paramhash = hash(xml_format(fparams(i)),'MD5'); + end + xml_save(sprintf('runlog_%s_param.xml', paramhash), fparams(i)); + xml_save(sprintf('runlog_%s_err.xml', paramhash), print_error(err)); + + continue; + end + + + % skip empty feature set; + if isempty(X) + continue; + end + % iterate over trainparams + for j = 1:numel(trainparams) + + % TIMING + mlrtic(end+1) = cputime; + paramhash_mlr = hash(xml_format(trainparams(j)),'MD5'); + + % --- + % Note: here we load the similarity data. + % this data is trunated if inctrain + % --- + simdata = load(trainparams(j).dataset); + if isfield(trainparams(j),'inctrain') && (trainparams(j).inctrain == 0) + + simdata.partBinTrn = simdata.partBinTrn(:,end); + simdata.partBinNoTrn = simdata.partBinNoTrn(:,end); + end + + % --- + % NOTE: THIS IS TRAINING + % call training function + % --- + [tmp] = do_test_rounds(trainfun, X, simdata, trainparams(j), fparams(i),... + paramhash, paramhash_mlr, clips); + + tmp.finfo = features(1).data.info; + tmp.fparams = features(1).my_params; + tmp.fparamhash = paramhash; + tmp.script = testscript; + + % TIMING + mlrtoc(end+1) = cputime - mlrtic(end); + % save result to result struct; + if ~isempty(tmp) + if isempty(res) + + res = tmp; + else + + res(end+1) = tmp; + end + + fprintf('Learned something: %2.2f perc. Took %2.2f minutes \n',... + max(max(tmp.mean_ok_test)) * 100, mlrtoc(end) / 60 ); + pause(0.5); + else + warning('Learned nothing \n'); + end + + % save output + out = res; + + % save final results + save(sprintf('runlog_%s_%s_finalresults.mat',... + hash(xml_format(fparams),'MD5'),... + hash(xml_format(trainparams),'MD5')... + ), 'out'); + + xml_save(sprintf('runlog_%s_%s_params.mat',... + hash(xml_format(fparams),'MD5'),... + hash(xml_format(trainparams),'MD5')... + ), struct('fparams', fparams_all, 'trainparams', trainparams_all) ); + + end + + % TIMING + clc; + tatoc(end+1) = cputime - tatic(end); + fprintf('%3.2f percent done, %2.2fh spent, %2.2fh to go. \n mlr / feature: %3.3f \n',... + (runs / nruns) *100 , sum(tatoc) / 3600,... + ((sum(tatoc) / runs) * (nruns - runs)) / 3600, mean(mlrtoc) / mean(ftoc) ); + fprintf('\nGo get a coffee, the next round will take %3.0f minutes \n',... + (mean(mlrtoc) * numel(trainparams) + mean(ftoc)) / 60); +end +end + + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/xml_parse_mtt.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/xml_parse_mtt.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,135 @@ +function features = xml_parse_mtt(file) +% features = xml_parse_mtt(file) +% +% parses the magnatagatune xml file to retrieve the audio +% analysis results. The file isexpected to use the deprecated +% EchoNest XML format +% +% time always travels an the horizontal axis + +% --- +% TODO: Add Error checking to certan tags as chroma number, +% loudness max etc. +% --- + +fdata = fileread(file); +try + % --- + % TODO: check for other than the p-code exceptions + % --- + + data = xml_parseany(fdata); +catch exception +end +%% ------------------------------------------------------------------------ +% --- +% copy general track data +% --- + +fields = fieldnames(data.track{1}.ATTRIBUTE); +for i = 1:length(fields) + features.(fields{i}) = str2double(data.track{1}.ATTRIBUTE.(fields{i})); +end + +%% ------------------------------------------------------------------------ +% --- +% get harmonic analysis. this is stored in segments +% --- +dsegments = data.track{1}.segments{1}.segment; + +for i = 1:numel(dsegments) + + % get start and duration + segments(i).start = str2double(dsegments{i}.ATTRIBUTE.start); + segments(i).duration = str2double(dsegments{i}.ATTRIBUTE.duration); + + % --- + % NOTE: for the chroma and mfcc features, we assume that the classes + % are always saved and parsed in correct order, thus we can afford to + % refrain from saving the class number with the class + % --- + + % assemble chroma features + segments(i).pitches = zeros(12,1); + for j = 1:12 + + segments(i).pitches(j) = str2double(... + dsegments{i}.pitches{1}.pitch{j}.CONTENT); + end + + % assemble mfcc features; + segments(i).timbre = zeros(numel(dsegments{i}.timbre{1}.coeff),1); + for j = 1:numel(dsegments{i}.timbre{1}.coeff) + + segments(i).timbre(j) = str2double(... + dsegments{i}.timbre{1}.coeff{j}.CONTENT); + end + + % get loudness measurements in dB and time + segments(i).loudness = str2double(... + dsegments{i}.loudness{1}.dB{1}.CONTENT); + + segments(i).loudness_time = str2double(... + dsegments{i}.loudness{1}.dB{1}.ATTRIBUTE.time); + + segments(i).loudness_max = str2double(... + dsegments{i}.loudness{1}.dB{2}.CONTENT); + + segments(i).loudness_max_time = str2double(... + dsegments{i}.loudness{1}.dB{2}.ATTRIBUTE.time); +end + +features.segments = segments; + +%% ------------------------------------------------------------------------ +% --- +% get sections +% --- +dsections = data.track{1}.sections{1}.section; + +secstart = zeros(1,numel(dsections)); +secduration = zeros(1,numel(dsections)); +for i = 1:numel(dsections) + sections(i).start = str2double(dsections{i}.ATTRIBUTE.start); + sections(i).duration = str2double(dsections{i}.ATTRIBUTE.duration); +end + +features.sections = sections; + +%% ------------------------------------------------------------------------ +% --- +% get beat and rythm data. the metric data is structured +% hierarchically, as each bar contains several beats, +% which contaisn several tatums. +% NOTE: Although the metrum and tempo have been evaluated and fixed +% on a global scale, the number of bars and tatum vary greatly. +% --- +dbars = data.track{1}.meter{1}.bar; + +for i = 1:numel(dbars) + + % get bar information + bars(i).confidence = str2double(dbars{i}.ATTRIBUTE.conf); + for j = 1:numel(dbars{i}.beat) + + % get beat information + bars(i).beat(j).confidence = str2double(dbars{i}.beat{j}.ATTRIBUTE.conf); + + for k = 1:numel(dbars{i}.beat{j}.tatum) + + % get tatum information + if ~isempty(dbars{i}.beat{j}.tatum{k}.ATTRIBUTE) + bars(i).beat(j).tatum(k).time = str2double(dbars{i}.beat{j}.tatum{k}.CONTENT); + bars(i).beat(j).tatum(k).confidence = str2double(dbars{i}.beat{j}.tatum{k}.ATTRIBUTE.conf); + + else + % save empty struct + bars(i).beat(j).tatum = struct([]); + end + + end + end +end + +features.bars = bars; + diff -r 000000000000 -r cc4b1211e677 core/magnatagatune/xml_write_tags.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/magnatagatune/xml_write_tags.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,18 @@ +% xmloutput_tags + +% Create a sample XML document. +docNode = com.mathworks.xml.XMLUtils.createDocument... + ('tags') +docRootNode = docNode.getDocumentElement; +for i=1:length(annots_descripts) + thisElement = docNode.createElement('tagname'); + thisElement.appendChild... + (docNode.createTextNode(annots_descripts{i}(1:end))); + docRootNode.appendChild(thisElement); +end +%docNode.appendChild(docNode.createComment('this is a comment')); + +% Save the sample XML document. +xmlFileName = ['test','.xml']; +xmlwrite(xmlFileName,docRootNode); +edit(xmlFileName); \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/startup_music_research.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/startup_music_research.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,63 @@ +% --- +% startup.m +% this shall become the general camir startup +% --- + +% global globalvars; + +% use this revision numbers for figures etc! +% globalvars.camir.revision = str2double(substr('$Rev$', 5, -1)); +% rev = textscan(evalc('!svnversion'),'%d%s'); +% globalvars.camir.revision = double(rev{1}); + +globalvars.camir.path = pwd; +globalvars.camir.revision = camirversion; + +clear rev +globalvars.debug = 2; + + +% load database +global comparison; +global comparison_ids; +load ('db.mat', 'comparison','comparison_ids','comparison_names'); + +warning off MATLAB:class:cannotUpdateClass + +% initialize clip and feature databases + +global db_magnaclips; +db_magnaclips = MTTClipDB(); + +global db_magnaaudiofeat; +db_magnaaudiofeat = MTTAudioFeatureDBgen('MTTAudioFeatureRAW'); + +global db_magnaaudiofeat_basicsm; +db_magnaaudiofeat_basicsm = MTTAudioFeatureDBgen('MTTAudioFeatureBasicSm'); + +global db_magnatagfeat_genrebasic; +db_magnatagfeat_genrebasic = MTTAudioFeatureDBgen('MTTTagFeatureGenreBasic'); + +global db_magnamixedfeat_genrebasicsm; +db_magnamixedfeat_genrebasicsm = MTTAudioFeatureDBgen('MTTMixedFeatureGenreBasicSm'); + +global db_MTTMixedFeatureGenreBasicSmPCA; +db_MTTMixedFeatureGenreBasicSmPCA = MTTAudioFeatureDBgen('MTTMixedFeatureGenreBasicSmPCA'); + +global db_MTTAudioFeatureSlaney08; +db_MTTAudioFeatureSlaney08 = MTTAudioFeatureDBgen('MTTAudioFeatureSlaney08'); + +global db_MTTMixedFeatureSlaney08GenreBasicSm; +db_MTTMixedFeatureSlaney08GenreBasicSm = MTTAudioFeatureDBgen('MTTMixedFeatureSlaney08GenreBasicSm'); + +global db_MTTMixedFeatureSlaney08GenreBasicSmPCA; +db_MTTMixedFeatureSlaney08GenreBasicSmPCA = MTTAudioFeatureDBgen('MTTMixedFeatureSlaney08GenreBasicSmPCA'); + +global db_MTTMixedFeatureGenreRandom; +db_MTTMixedFeatureGenreRandom = MTTAudioFeatureDBgen('MTTMixedFeatureGenreRandom'); + +global db_MTTMixedFeatureStober11Genre; +db_MTTMixedFeatureStober11Genre = MTTAudioFeatureDBgen('MTTMixedFeatureStober11Genre'); + +global db_MTTMixedFeatureStober11Slaney08GenreBasicSm; +db_MTTMixedFeatureStober11Slaney08GenreBasicSm = MTTAudioFeatureDBgen('MTTMixedFeatureStober11Slaney08GenreBasicSm'); diff -r 000000000000 -r cc4b1211e677 core/tools/CQueue.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/CQueue.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,160 @@ +classdef CQueue < handle +% CQueue define a queue data strcuture +% +% It likes java.util.Queue, however, it could use CQueue.content() to +% return all the data (in cells) of the Queue, and it is a litter faster +% than java's Queue. +% +% q = CQueue(c); c is a cells, and could be omitted +% s.size() return the numble of element +% s.isempty() return true when the queue is empty +% s.empty() delete all the elements in the queue. +% s.push(el) push el to the top of qeueu +% s.pop() pop out the the beginning of queue, and return the element +% s.front() return the the the beginning element of the qeueu +% s.back() return the the the rear element of the qeueu +% s.remove() remove all data from the queue +% s.content() return all the data of the queue (in the form of a +% cells with size [s.size(), 1] +% +% See also CList, CStack +% Copyright: zhang@zhiqiang.org, 2010. +% url: http://zhiqiang.org/blog/it/matlab-data-structures.html + + properties (Access = private) + buffer % a cell, to maintain the data + beg % the start position of the queue + rear % the end position of the queue + % the actually data is buffer(beg:rear-1) + end + + properties (Access = public) + capacity % Õ»µÄÈÝÁ¿£¬µ±ÈÝÁ¿²»¹»Ê±£¬ÈÝÁ¿À©³äΪ2±¶¡£ + end + + methods + function obj = CQueue(c) % ³õʼ»¯ + if nargin >= 1 && iscell(c) + obj.buffer = [c(:); cell(numel(c), 1)]; + obj.beg = 1; + obj.rear = numel(c) + 1; + obj.capacity = 2*numel(c); + elseif nargin >= 1 + obj.buffer = cell(100, 1); + obj.buffer{1} = c; + obj.beg = 1; + obj.rear = 2; + obj.capacity = 100; + else + obj.buffer = cell(100, 1); + obj.capacity = 100; + obj.beg = 1; + obj.rear = 1; + end + end + + function s = size(obj) % ¶ÓÁг¤¶È + if obj.rear >= obj.beg + s = obj.rear - obj.beg; + else + s = obj.rear - obj.beg + obj.capacity; + end + end + + function b = isempty(obj) % return true when the queue is empty + b = ~logical(obj.size()); + end + + function s = empty(obj) % clear all the data in the queue + s = obj.size(); + obj.beg = 1; + obj.rear = 1; + end + + function push(obj, el) % ѹÈëÐÂÔªËØµ½¶Óβ + if obj.size >= obj.capacity - 1 + sz = obj.size(); + if obj.rear >= obj.front + obj.buffer(1:sz) = obj.buffer(obj.beg:obj.rear-1); + else + obj.buffer(1:sz) = obj.buffer([obj.beg:obj.capacity 1:obj.rear-1]); + end + obj.buffer(sz+1:obj.capacity*2) = cell(obj.capacity*2-sz, 1); + obj.capacity = numel(obj.buffer); + obj.beg = 1; + obj.rear = sz+1; + end + obj.buffer{obj.rear} = el; + obj.rear = mod(obj.rear, obj.capacity) + 1; + end + + function el = front(obj) % ·µ»Ø¶ÓÊ×ÔªËØ + if obj.rear ~= obj.beg + el = obj.buffer{obj.beg}; + else + el = []; + warning('CQueue:NO_DATA', 'try to get data from an empty queue'); + end + end + + function el = back(obj) % ·µ»Ø¶ÓÎ²ÔªËØ + + if obj.rear == obj.beg + el = []; + warning('CQueue:NO_DATA', 'try to get data from an empty queue'); + else + if obj.rear == 1 + el = obj.buffer{obj.capacity}; + else + el = obj.buffer{obj.rear - 1}; + end + end + + end + + function el = pop(obj) % µ¯³ö¶ÓÊ×ÔªËØ + if obj.rear == obj.beg + error('CQueue:NO_Data', 'Trying to pop an empty queue'); + else + el = obj.buffer{obj.beg}; + obj.beg = obj.beg + 1; + if obj.beg > obj.capacity, obj.beg = 1; end + end + end + + function remove(obj) % Çå¿Õ¶ÓÁÐ + obj.beg = 1; + obj.rear = 1; + end + + function display(obj) % ÏÔʾ¶ÓÁÐ + if obj.size() + if obj.beg <= obj.rear + for i = obj.beg : obj.rear-1 + disp([num2str(i - obj.beg + 1) '-th element of the stack:']); + disp(obj.buffer{i}); + end + else + for i = obj.beg : obj.capacity + disp([num2str(i - obj.beg + 1) '-th element of the stack:']); + disp(obj.buffer{i}); + end + for i = 1 : obj.rear-1 + disp([num2str(i + obj.capacity - obj.beg + 1) '-th element of the stack:']); + disp(obj.buffer{i}); + end + end + else + disp('The queue is empty'); + end + end + + function c = content(obj) % È¡³ö¶ÓÁÐÔªËØ + if obj.rear >= obj.beg + c = obj.buffer(obj.beg:obj.rear-1); + else + c = obj.buffer([obj.beg:obj.capacity 1:obj.rear-1]); + end + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/CStack.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/CStack.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,108 @@ +classdef CStack < handle +% CStack define a stack data strcuture +% +% It likes java.util.Stack, however, it could use CStack.content() to +% return all the data (in cells) of the Stack, and it is a litter faster +% than java's Stack. +% +% s = CStack(c); c is a cells, and could be omitted +% s.size() return the numble of element +% s.isempty() return true when the stack is empty +% s.empty() delete the content of the stack +% s.push(el) push el to the top of stack +% s.pop() pop out the top of the stack, and return the element +% s.top() return the top element of the stack +% s.remove() remove all the elements in the stack +% s.content() return all the data of the stack (in the form of a +% cells with size [s.size(), 1] +% +% See also CList, CQueue +% +% Copyright: zhang@zhiqiang.org, 2010. +% url: http://zhiqiang.org/blog/it/matlab-data-structures.html + + properties (Access = private) + buffer % Ò»¸öcellÊý×飬±£´æÕ»µÄÊý¾Ý + cur % µ±Ç°ÔªËØÎ»ÖÃ, or the length of the stack + capacity % Õ»µÄÈÝÁ¿£¬µ±ÈÝÁ¿²»¹»Ê±£¬ÈÝÁ¿À©³äΪ2±¶¡£ + end + + methods + function obj = CStack(c) + if nargin >= 1 && iscell(c) + obj.buffer = c(:); + obj.cur = numel(c); + obj.capacity = obj.cur; + elseif nargin >= 1 + obj.buffer = cell(100, 1); + obj.cur = 1; + obj.capacity =100; + obj.buffer{1} = c; + else + obj.buffer = cell(100, 1); + obj.capacity = 100; + obj.cur = 0; + end + end + + function s = size(obj) + s = obj.cur; + end + + function remove(obj) + obj.cur = 0; + end + + function b = empty(obj) + b = obj.cur; + obj.cur = 0; + end + + function b = isempty(obj) + b = ~logical(obj.cur); + end + + function push(obj, el) + if obj.cur >= obj.capacity + obj.buffer(obj.capacity+1:2*obj.capacity) = cell(obj.capacity, 1); + obj.capacity = 2*obj.capacity; + end + obj.cur = obj.cur + 1; + obj.buffer{obj.cur} = el; + end + + function el = top(obj) + if obj.cur == 0 + el = []; + warning('CStack:No_Data', 'trying to get top element of an emtpy stack'); + else + el = obj.buffer{obj.cur}; + end + end + + function el = pop(obj) + if obj.cur == 0 + el = []; + warning('CStack:No_Data', 'trying to pop element of an emtpy stack'); + else + el = obj.buffer{obj.cur}; + obj.cur = obj.cur - 1; + end + end + + function display(obj) + if obj.cur + for i = 1:obj.cur + disp([num2str(i) '-th element of the stack:']); + disp(obj.buffer{i}); + end + else + disp('The stack is empty'); + end + end + + function c = content(obj) + c = obj.buffer(1:obj.cur); + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/DiGraph.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/DiGraph.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,853 @@ + +% GENERAL GRAPH THEORY CLASS: DiGraph +% +% CAVE: Any special application oriented stuff +% should be outside here + +classdef DiGraph < handle + +properties + + V = sparse(0,1); % boolean for nodes + Vlbl = {}; + + E = sparse(0,0); % sparse weighted edge graph +end + +methods + +% --- +% Constructor +% --- +function G = DiGraph(V, E, Vlbl) + + if nargin == 1 + if ~isnumeric(V) + + G = DiGraph.copy(V); + else + % build graph from Edge Adjacency Matrix + G.V = sparse(find(sum(V + V')) > 0); + G.E = V; + end + end + + if nargin >= 2 + G = DiGraph(); + + if numel(V) ~= max(size(E)) + error 'wrong dimensions'; + end + + % --- + % We copy existent nodes and edges + % --- + if numel(V) == size(V,2) + G.V = V; + else + G.V = V'; + end + G.E = E; + + % --- + % We copy the labels for existent nodes, + % if supplied. + % NOTE: the FIND call is neccessary, as logical indexing + % does not work in this case + % TODO: WHY NOT? + % --- + if (nargin == 3) && ~isempty(Vlbl) + G.Vlbl = cell(numel(G.V),1); + G.Vlbl(find(G.V)) = Vlbl(find(G.V)); + end + + end +end + +% get list of node ids +function out = nodes(G) + out = find(G.V); +end + +% get list of node ids +function out = last_node(G) + out = find(G.V,1,'last'); +end + +function w = edge(G, V1, V2) + + w = G.E(V1, V2); +end + +% get edge list representation +function [val, V1, V2] = edges(G, V1) + + if nargin == 1 + + % get all edges of the graph + [V1, V2] = find(G.E); + + val = zeros(numel(V1), 1); + + for i = 1:numel(V1) + val(i) = G.E(V1(i), V2(i)); + end + + elseif nargin == 2 + + % get all edges coming from or entering this node + V2 = find(G.E(V1, :)); + val = G.E(V1, V2); + + V2 = [ V2 find(G.E(:,V1))']; + val = [ val G.E(V2, V1)']; + end +end + +% compare two graphs +function out = eq(a,b) + + % --- + % compare graph stats + % necessary + % --- + if a.order ~= b.order + out = false; + cprint(3, 'eq: graph node numbers do not match'); + return + end + + if a.num_edges ~= b.num_edges + out = false; + cprint(3, 'eq: graph edge numbers do not match'); + return + end + + + % --- + % compare labels and reindex graph b if + % necessary + % --- + lbla = a.label(); + lblb = b.label(); + + for i = 1:numel(lbla) + if ~isempty(lbla{i}) + tmpidx = strcellfind(lblb, lbla{i}); + + % check for substring problems + for j = 1:numel(tmpidx) + if strcmp(lblb{tmpidx(j)}, lbla{i}) + idx(i) = tmpidx(j); + break; + end + end + end + end + + % test on found labels + num_matching_lbl = sum(idx > 0); + if ~isempty(lbla) && (num_matching_lbl < a.order) + out = false; + cprint(3, 'eq: graph labels do not match'); + return + end + + % --- + % reindex edges and nodes: only replace valid indexes + % --- + val_idx = idx > 0; + idx = idx(val_idx); + + bE(val_idx,val_idx) = b.E(idx,idx); + bV(val_idx) = b.V(idx); + + if sum(a.V ~= bV) > 0 + out = false; + cprint(3, 'eq: nodes do not match'); + return + end + + if sum(sum(a.E ~= bE)) > 0 + out = false; + cprint(3, 'eq: edges do not match'); + return + end + + % --- + % OK, seems these Graphs are the same!!! + % --- + out = true; + +end + +% find node via its label +function Vid = node(G, label) + lbl = G.label(); + + Vid = strcellfind(lbl, label,1); +end + +% add a node +function add_node(G,V) + + % set node indicator + G.V(V) = 1; + + G.E(V,V) = 0; +end + +% remove a node +function remove_node(G,V) + + % remove node + G.V(V) = 0; + + % remove all edges + G.E(V,:) = 0; + G.E(:,V) = 0; +end + +% remove an edge +function remove_edge(G, V1, V2) + + G.E(V1, V2) = 0; +end + +% add an edge +function add_edge(G, V1, V2, weight) + + G.add_node(V1); + G.add_node(V2); + + if G.E(V1,V2) == 0 + + % save weight + set_edge(G, V1, V2, weight); + else + + join_edge(G, V1, V2, weight) + end +end + +% --- +% implementation of edge joining, +% to be overwritten for several +% purposes +% --- +function join_edge(G, V1, V2, weight) + + set_edge(G, V1, V2, G.edge(V1, V2) + weight); +end + +% --- +% sets the edge without considering earlier weights +% --- +function set_edge(G, V1, V2, weight) + + G.E(V1, V2) = weight; +end + +% --- +% Graph-specific functions +% --- +function c = children(G, Vid) + + c = find(G.E(Vid,:) > 0); +end + +function p = parents(G, Vid) + + p = find(G.E(:,Vid) > 0)'; +end + +% --- +% Vertex Degree +% --- +function out = degree(G, V) + + out = sum(G.E(V,:) > 0) + sum(G.E(:,V) > 0); +end + +% --- +% Vertex Degree +% --- +function out = degree_in(G, V) + + out = sum(G.E(V,:) > 0); +end + +% --- +% Max Degree In +% --- +function out = max_degree_in(G) + + out = max(sum(G.E > 0, 2)); +end + +% --- +% Vertex Degree +% --- +function out = degree_out(G, V) + + out = sum(G.E(:,V) > 0); +end + +% --- +% Max Degree In +% --- +function out = max_degree_out(G) + + out = max(sum(G.E > 0, 1)); +end + + +% --- +% Max Degree +% --- +function out = max_degree(G) + + out = max(sum(G.E > 0, 1) + sum(G.E > 0, 2)'); +end + +% --- +% Max weight +% --- +function out = max_weight(G) + + out = max(max(G.E)); +end + +% --- +% Number of Vertices in Graph +% --- +function out = order(G) + out = sum(G.V); +end + +% --- +% Number of Edges in Graph +% --- +function out = num_edges(G) + out = sum(sum(G.E ~= 0)); +end + +% --- +% Number of Vertices in Graph +% --- +function out = cardinality(G) + out = order(G); +end + +function Gs = unconnected_subgraph(G) + Vtmp = (sum(G.E + G.E') == 0); + Etmp = sparse(size(G.E, 1), size(G.E, 1)); + + Gs = DiGraph(Vtmp, Etmp, G.label()); +end + +% --- +% return string labels for a (set of) node(S) +% --- +function out = label(G, Vid) + + out = {}; + if nargin == 1 + % maybe much faster for whole graph + if (numel(G.Vlbl) < G.order() || isempty(G.Vlbl)) + + out = cell(numel(G.V), 1); + for i = 1:numel(Vid) + out{i} = sprintf('%d', Vid(i)); + end + else + out = G.Vlbl; + end + + elseif nargin == 2 + for i = 1:numel(Vid) + + if (numel(G.Vlbl) < Vid(i)) || isempty(G.Vlbl{Vid(i)}) + + if numel(Vid) > 1 + out{i} = sprintf('%d', Vid(i)); + else + out = sprintf('%d', Vid(i)); + end + else + if numel(Vid) > 1 + out{i} = G.Vlbl{Vid(i)}; + else + out = G.Vlbl{Vid(i)}; + end + end + end + end +end + + +% ----------------------------------------------------------------- +% Graph theory algorithms +% --- + + +% --- +% sets all the main diagonal edges to zero +% --- +function remove_cycles_length1(G) + + for i = G.nodes(); + G.E(i,i) = 0; + end +end + + +% --- +% Returns whether a given graph has cycles or not, +% using the SCC search +% --- +function out = isAcyclic(G) + % --- + % We get all the sccs in the DiGraph, and + % return true if none of the sccs has more than + % one node + % --- + + % check for self-loops + if sum(abs(diag(G.E))) > 0 + out = 0; + return; + end + + [~, s, ~] = strongly_connected_components(G); + + if max(s) < 2 + out = 1; + else + out = 0; + end +end + +% --- +% All SCC's ordered by number of nodes in graph +% +% this should also be able to return +% the SCC of just one node +% --- +function [Gs, s, id] = strongly_connected_components(G, Vin) + + % marking + marked = zeros(size(G.V)); + + % --- + % two stacks, + % one: has not been assigned to scc + % two: unclear if in same scc + % --- + stack1 = CStack(); + stack2 = CStack(); + + % initialise scc ids + id = zeros(size(G.V)) - 1; % assigned scc? + idctr = 1; + + % initialise graph ordering + preorder = zeros(G.order, 1); + prectr = 0; + + for v = G.nodes(); + if ~marked(v) + dfs(G, v); + end + end + + % --- + % create subgraphs (DiGraph here) for sccs + % --- + if nargin == 1 + + s = zeros(idctr-1,1); + for idctr2 = 1:idctr-1 + Vtmp = (id == idctr2); + Emask = sparse(size(G.E, 1), size(G.E, 1)); + Emask(Vtmp,Vtmp) = 1; + Etmp = G.E .* Emask; + + Gs(idctr2) = DiGraph(Vtmp, Etmp, G.Vlbl); + s(idctr2) = Gs(idctr2).order(); + end + + % --- + % order by number of nodes + % --- + [s, idx] = sort(s,'descend'); + Gs = Gs(idx); + Gmax = Gs(1); + + else + % --- + % get just the scc for the questioned node + % --- + Vtmp = (id == id(Vin)); + Emask = sparse(size(G.E, 1), size(G.E, 1)); + Emask(Vtmp,Vtmp) = 1; + Etmp = G.E .* Emask; + + Gs = DiGraph(Vtmp, Etmp); + s = Gs.order(); + end + + % --- + % NOTE: THIS IS A NESTED DFS BASED GRAPH ORDERING + % --- + function dfs(G, v) + + % mark this node as visited + marked(v) = 1; + + preorder(v) = prectr; + prectr = prectr + 1; + + % push into both stacks + stack1.push(v); + stack2.push(v); + + % --- + % dfs + % --- + for w = G.children(v) + if ~marked(w) + % --- + % traverse into dfs if not yet visited + % --- + dfs(G, w); + + elseif id(w) == -1 + + % --- + % w has not yet been assigned to a strongly connected + % component + % --- + while ((preorder(stack2.top()) > preorder(w))) + stack2.pop(); + end + end + end + + % --- + % found scc containing v + % --- + if (stack2.top() == v) + stack2.pop(); + + w = -1; + while (w ~= v) + + % --- + % collect all the nodes of this scc + % --- + w = stack1.pop(); + id(w) = idctr; + end + idctr = idctr + 1; + end + + end % function dfs +end + +function [Gs, s, id] = connected_components(G, varargin) + % --- + % get all connected subgraphs: + % --- + + % make edge matrix undirected + G2 = DiGraph(Graph(G)); + + [GsGraph, s, id] = strongly_connected_components(G2, varargin{:}); + + % get the actual subgraps + + for i =1:numel(GsGraph) + Gs(i) = G.subgraph(GsGraph(i).nodes); + end +end + +% --- +% creates new graph just containing the +% specified nodes and optionally specified edges +% nodes can be specified using +% a. the binary G.V structure +% b. or node indices +% --- +function G2 = subgraph(G, V, E) + if nargin == 2 + E = []; + end + + % --- + % create new graph as copy ofthe old + % --- + + G2 = feval(class(G), G); + + % --- + % reset nodes and edges + % NOTE: we determine the input format first + % --- + if (max(V) == 1 && numel(V) > 1) || max(V) == 0 + + G2.remove_node(find(~V)); + else + + G2.remove_node(setdiff(1:numel(G.V), V)); + end + if ~isempty(E) + G2.E = E; + end +end + +% --- +% joins the information of graph G2 with +% this GRaph, not duplicating nodes. +% --- +function add_graph(G, G2) + + % determine if symmetric edges have to be added + clas = cat(1, class(G2), superclasses(G2)); + if strcellfind(clas, 'Graph') + add_symm = 1; + else + add_symm = 0; + end + + % --- + % add all nodes and edges in G2 + % --- + for V = G2.nodes(); + + G.add_node(V); + end + + % --- + % NOTE / TODO: + % this LABEL inheritance is far too expensive when + % creating many copiesof the same graph + % Its also unnessecary for lots of them + % except for debugging :(. + % --- + G.Vlbl = cell(1,numel(G2.V)); + G.Vlbl(G2.nodes()) = G2.label(G2.nodes()); + + % --- + % get all edges in G2 + % --- + [val, V1, V2] = edges(G2); + + for i = 1:numel(val) + + % --- + % add edges to graph + % --- + G.add_edge(V1(i), V2(i), val(i)); + + if add_symm + % --- + % add symmetric edges to graph + % --- + G.add_edge(V2(i), V1(i), val(i)); + end + + end +end + +% --- +% substracts the edges in G2 from +% this GRaph, removing not connected nodes. +% --- +function Gout = minus(a, b) + Gout = feval(class(a),a); + Gout.remove_graph(b); +end + +function remove_graph(G, G2) + + % determine if symmetric edges have to be added + clas = cat(1, class(G2), superclasses(G2)); + if strcellfind(clas, 'Graph') + symm = 1; + else + symm = 0; + end + + % remove edges + [val, V1, V2] = G2.edges(); + for j = 1:numel(val) + + % remove specified edges + G.remove_edge(V1(j), V2(j)); + + % remove symmetric edges if subtracting symm graph + if symm + + G.remove_edge(V2(j), V1(j)); + end + end + + % --- + % Note : we only remove nodes with no + % remaining incoming edges + % --- + V = G2.nodes(); + for j = 1:numel(V) + + if G.degree(V(j)) == 0 + + G.remove_node(V(j)); + end + end + +end + +% --- +% compact graph representation +% --- +function [E, labels] = compact(G) + + % --- + % get nodes and create a reverse index + % --- + Vidx = sparse(G.nodes,1,1:G.order()); + [w, V1, V2] = G.edges(); + + % create compact adjacency matrix + E = sparse(Vidx(V1), Vidx(V2),w, G.order(), G.order()); + + labels = G.label(G.nodes()); +end + +% --- +% determines if Edges in G2 are the same as in G +% --- +function [out] = isSubgraph(G, G2) + + [val, V1, V2] = G2.edges(); + validE = false(numel(V1), 1); + + i = 1; + while i <= numel(V1) + + % --- + % Test if edge exists in other graph + % --- + if G.edge(V1(i),V2(i)) == val(i) + out = 0; + return; + end + i = i +1 ; + end + + % --- + % Test labels + % --- + if ~isempty(G.Vlbl) && ~isempty(G2.Vlbl) + + V = G2.nodes(); + i = 1; + while i <= numel(V) + if strcmp(G.label(V(i)), G2.label(V(i))) ~= 0 + out = 0; + return; + end + i = i + 1; + end + end + + out = 1; +end + +% --- +% Visualise the Similarity Graph +% --- +function visualise(G) + + % --- + % get colormap for edge weights + % --- + cmap = jet(100); + + % --- + % NOTE: we now get the weight colors for all edges + % get maximum weight and all edges + % --- + [colors, V1, V2] = G.edges(); + w = G.max_weight(); + + % normalise colors + colors = max(1,round((colors / w) * 100)); + + % get node labels + V1lbl = G.label(V1); + V2lbl = G.label(V2); + + % --- + % compose edgecolor matrix + % --- + ec = cell(numel(V1), 3); + for i = 1:numel(V1) + ec(i,:) = {V1lbl{i}, V2lbl{i}, cmap(colors(i),:)}; + end + + % --- + % For Performance Issues + % We get the compact Graph and + % !hope! for the labels to correspond to those above + % --- + [E, labels] = compact(G); + + % determine if symmetric Graph + clas = cat(1, class(G), superclasses(G)); + if strcellfind(clas, 'Graph') + symm = 1; + else + symm = 0; + end + + graphViz4Matlab('-adjMat',E, ... + '-nodeLabels',labels, ... + '-edgeColors', ec, ... + '-undirected', symm); +end +end + + +methods (Static) + function G = copy(Gin) + + if strcmp(class(Gin), 'DiGraph') + + G = DiGraph(Gin.V, Gin.E); + G.Vlbl = Gin.Vlbl; + + else + warning ('cannot copy graph, casting instead') + G = DiGraph.cast(Gin); + end + + end + + function G = cast(Gin) + + % --- + % this uses an imput grpaph + % to create a new digraph + % --- + G = DiGraph(); + + % --- + % Add the input graph to the empty one + % --- + G.add_graph(Gin); + + end +end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/Graph.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/Graph.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,154 @@ +% GENERAL GRAPH THEORY CLASS +% +% CAVE: Any special application oriented stuff +% should be outside here + +classdef Graph < DiGraph + +methods + + % --- +% Constructor +% --- +function G = Graph(V, E) + + if nargin == 1 + % --- + % this uses an imput grpaph + % to create a new graph + % --- + G = Graph.copy(V); + + elseif nargin == 2 + + if numel(V) == size(V,2) + G.V = V; + else + G.V = V'; + end + + G.E = E; + end +end +function w = edge(G, V1, V2) + + sm = min([V1, V2]); + bg = max([V1, V2]); + + w = G.E(sm, bg); +end + +% --- +% add an edge +% the undirected case: G.E is triangular +% --- +function add_edge(G, V1, V2, weight) + + G.add_node(V1); + G.add_node(V2); + + sm = min([V1, V2]); + bg = max([V1, V2]); + + if G.E(sm, bg) == 0 + + % save weight + G.E(sm, bg) = weight; + else + + join_edge(G, V1, V2, weight); + end +end + +% remove an edge +function remove_edge(G, V1, V2) + + sm = min([V1, V2]); + bg = max([V1, V2]); + + G.E(sm,bg) = 0; +end + +% --- +% sets the edge without considering earlier weights +% --- +function set_edge(G, V1, V2, weight) + + sm = min([V1, V2]); + bg = max([V1, V2]); + + G.E(sm, bg) = weight; + +end + +% --- +% Graph-specific functions +% in the undirected case, these are the same +% --- +function c = children(G, Vid) + + c = [find(G.E(Vid,:) > 0) find(G.E(:,Vid) > 0)']; +end + +function p = parents(G, Vid) + + p = [find(G.E(Vid,:) > 0) find(G.E(:,Vid) > 0)']; +end + +% --- +% Vertex Degree +% --- +function out = degree(G, V) + + out = sum(G.E(V,:) > 0) + sum(G.E(:,V) > 0); +end + +% --- +% Vertex Degree +% --- +function out = degree_in(G, V) + + out = sum(G.E(V,:) > 0) + sum(G.E(:,V) > 0); +end + +% --- +% Vertex Degree +% --- +function out = degree_out(G, V) + + out = sum(G.E(:,V) > 0) + sum(G.E(:,V) > 0); +end + +end + + +methods (Static) + function G = copy(Gin) + + if strcmp(class(Gin), 'Graph') + + G = Graph(Gin.V, Gin.E); + G.Vlbl = Gin.Vlbl; + + else + warning ('cannot copy graph, casting instead') + G = Graph.cast(Gin); + end + + end + + function G = cast(Gin) + + % --- + % this uses an imput grpaph + % to create a new digraph + % --- + G = Graph(); + + % --- + % Add the input graph to the empty one + % --- + G.add_graph(Gin); + end +end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/NaiveHash.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/NaiveHash.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,40 @@ +classdef NaiveHash < handle + + properties + + last_key = 0; + map; + end + +% --- +% the methods +% --- +methods + function ht = NaiveHash(data) + + ht.Map = containers.Map('KeyType', 'int32', 'ValueType', 'int32'); + end + + function put(ht, data) + + % get new key + last_key = last_key + 1; + ticketMap(data) = last_key; + end + + function data(ht, data) + + end + + function out = key(ht, data) + + out = ht.Map(data); + end + + function iskey(ht, key) + + end + +end + +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/addTopXAxis.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/addTopXAxis.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,140 @@ +function [nAxis] = addTopXAxis (varargin) + +% [nAxis] = addTopXAxis (axisH, properties ...) +% Add an X-axis on top (additional to the one on bottom) of the figure, +% with its own ticks labels and axis label (allow double scale). +% Ticks, minor ticks positions and x limits are set to be identical for top and bottom +% axis. Moreover, they are linked, i.e. a modification of one of them, even after execution +% of this function, will result in modification of the other one. +% +% Parameters: +% axisH = handle for axes (dafault = current axes, given by |gca|) +% properties: syntax is two-folded arguments for each property, +% on the form: 'name of property' (case insensitive), value of property +% and properties can be +% xLabStr : the label for the top X-axis (string) +% default = '' +% expression or exp : expression for transforming bottom X-tick labels to +% top X-tick labels. In the expression, |argu| will refer +% to the value of the bottom X-tick labels. +% eg. going from |log10(x)| to linear |x| values will be done with +% the string '10.^argu' for expression. +% default = '' +% Note : if expression needs to use some variables, that are not accessible to this function, they need to be evaluated +% before being passed to this function (see exemples below). +% xTickLabelFormat : display format, used for converting numerical values for tick labels in strings. (string) +% The computation of top axis tick labels using |expression| can lead to numbers taking a lot of space +% because of the precision of the display. Reducing the precision will reduce the size of the labels. +% default = '%4.1f' +% +% Exemples: +% +% axisH = axes % create new axes, and save the handle in axisH +% addTopXAxis(axisH, 'expression', '-argu') % change the label of the ticks by their opposite +% +% +% addTopXAxis('xticklabel', '(k0.*10.^argu)', 'xlabel', '$\lambda_{up}$ (cm)') +% will use the current axis (handle is not passed to the function), and will compute the new X-tick values according to +% x' = k0.*10.^argu; +% where |k0| is a variable whose value has to be set in the 'base' workspace. + +% +% V2 - 11/27/2005 +% Modifs for V2: +% * - now evaluates expression in 'base' workspace. +% Therefore, variables (like 'k0' in the second example) do not need to be evaluated anymore before being passed, +% as long as they already exist in 'base' workspace. It should allow more complex expressions to be passed than previously. +% Example 2 then becomes +% addTopXAxis('expression', '(k0.*10.^argu)', 'xLabStr', '$\lambda_{up}$ (cm)') +% instead of +% addTopXAxis('expression', ['(', num2str(k0),'.*10.^argu)'], 'xLabStr', '$\lambda_{up}$ (cm)' +% Drawback: the function create/assign a variable called 'axisH' in base workspace. Potential conflicts here ... +% * - properties are not case sensitive anymore +% * - 'exp' can be used instead of 'expression' for property name (following John D'Errico comment, if I understood it well ....) +% +% +% Author : Emmanuel P. Dinnat +% Date : 09/2005 +% Contact: emmanueldinnat@yahoo.fr + +global hlink % make the properties link global + +%% Default values for properties +axisH = gca; +xTickLabelFormat = '%4.1f'; +xLabStr = ''; +expression = ''; + +%% Process input parameters (if they exist) +% if input parameters +if length(varargin) > 0 + if isstr(varargin{1}) % if no axes handle is passed ... + axisH = gca; + if (length(varargin) > 1) + properties = varargin(1:end); + end + else % else deal with passed axes handle + if ishandle(varargin{1}) + axisH = varargin{1}; + else + error('addTopXAxis : handle for axes invalid.') + end + properties = varargin(2:end); + end + if ~mod(length(properties),2) + for iArg = 1:length(properties)/2 + % switch properties{2*iArg-1} + switch lower(properties{2*iArg-1}) % modif V2 - suppress case sensitivity + case {'xticklabel','expression' , 'exp'} + expression = properties{2*iArg}; + % case 'xLabStr' + case {'xlabel','xlabstr'} % V2 + xLabStr = properties{2*iArg}; + % case 'xTickLabelFormat' + case 'xticklabelformat' % V2 + xTickLabelFormat = properties{2*iArg}; + otherwise + error(['addTopXAxis : property ''', properties{2*iArg-1},''' does not exist.']) + end + end + else + error('addTopXAxis : arguments number for proporties should be even.') + end +end % if input parameters +%% replace |argu| by x-tick labels in the computation expression for new x-tick labels +% newXtickLabel_command = regexprep(expression, 'argu', 'get(axisH, ''xTick'')'''); + +%% Get paramters of figures to be modified (other parameters to be copied on the new axis are not extracted) + set(axisH, 'units', 'normalized'); + cAxis_pos = get(axisH, 'position'); +%% shift downward original axis a little bit + cAxis_pos(2) = cAxis_pos(2)*0.8; + set(axisH, 'position', cAxis_pos); +%% Make new axis + nAxis = subplot('position', [cAxis_pos(1), (cAxis_pos(2)+cAxis_pos(4))*1.007, cAxis_pos(3), 0.0001]); +%% put new Xaxis on top + set(nAxis, 'xaxisLocation', 'top'); +%% Improve readability + %% delete Y label on new axis + set(nAxis, 'yTickLabel', []); + % remove box for original axis +% set(axisH, 'box', 'off'); + % remove grids + set(nAxis, 'yGrid', 'off'); + set(nAxis, 'xGrid', 'off'); +%% Set new Xaxis limits, ticks and subticks the same as original ones (by link) ... + set(nAxis, 'xlim', get(axisH, 'xlim')); + set(nAxis, 'XTick', get(axisH, 'XTick')); + set(nAxis, 'XMinorTick', get(axisH, 'XMinorTick')); + hlink = linkprop([nAxis, axisH], {'xLim','XTick','XMinorTick'}); +%% ... but replace ticks labels by new ones !!! + assignin('base', 'axisH', axisH) + % set(nAxis, 'xtickLabel', num2str(evalin('base', newXtickLabel_command), xTickLabelFormat)); + set(nAxis, 'xtickLabel', expression); + %% but label for new axis + if exist('xLabStr') + xlabel(xLabStr) + end +%% return current axis to original one (for further modification affecting original axes) + axes(axisH); +% hlink = linkprop([nAxis, gca], {'xLim','XTick','XMinorTick'}) \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/camircommit.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/camircommit.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,17 @@ +function camircommit() +% returns the global version of the camir project + +global globalvars; +curpath = pwd; + +% go to camir directory +cd (globalvars.camir.path); +tmp = textscan(evalc('!svnversion'),'%s'); + +% svn commit +svn commit; + +% return to current directory +cd (curpath); + +rev = tmp{1}; diff -r 000000000000 -r cc4b1211e677 core/tools/camirversion.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/camirversion.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,20 @@ +function [rev, latest] = camirversion() +% returns the global version of the camir project + +global globalvars; +curpath = pwd; + +% go to camir directory +cd (globalvars.camir.path); +tmp = textscan(evalc('!svnversion'),'%s'); + +% return to current directory +cd (curpath); + +rev = tmp{1}; +latest = textscan(rev{1},'%d','Delimiter',':'); +if ~isempty(latest{1}) + latest = double(latest{end}(end)); +else + latest='-1'; +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/cell2csv.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/cell2csv.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,85 @@ +function cell2csv(fileName, cellArray, separator, excelYear, decimal) +% Writes cell array content into a *.csv file. +% +% CELL2CSV(fileName, cellArray, separator, excelYear, decimal) +% +% fileName = Name of the file to save. [ i.e. 'text.csv' ] +% cellArray = Name of the Cell Array where the data is in +% separator = sign separating the values (default = ';') +% excelYear = depending on the Excel version, the cells are put into +% quotes before they are written to the file. The separator +% is set to semicolon (;) +% decimal = defines the decimal separator (default = '.') +% +% by Sylvain Fiedler, KA, 2004 +% updated by Sylvain Fiedler, Metz, 06 +% fixed the logical-bug, Kaiserslautern, 06/2008, S.Fiedler +% added the choice of decimal separator, 11/2010, S.Fiedler + +%% Checking für optional Variables +if ~exist('separator', 'var') + separator = ','; +end + +if ~exist('excelYear', 'var') + excelYear = 1997; +end + +if ~exist('decimal', 'var') + decimal = '.'; +end + +%% Setting separator for newer excelYears +if excelYear > 2000 + separator = ';'; +end + +%% Write file +datei = fopen(fileName, 'w'); + +for z=1:size(cellArray, 1) + for s=1:size(cellArray, 2) + + var = eval(['cellArray{z,s}']); + % If zero, then empty cell + if size(var, 1) == 0 + var = ''; + end + % If numeric -> String + if isnumeric(var) + var = num2str(var); + % Conversion of decimal separator (4 Europe & South America) + % http://commons.wikimedia.org/wiki/File:DecimalSeparator.svg + if decimal ~= '.' + var = strrep(var, '.', decimal); + end + end + % If logical -> 'true' or 'false' + if islogical(var) + if var == 1 + var = 'TRUE'; + else + var = 'FALSE'; + end + end + % If newer version of Excel -> Quotes 4 Strings + if excelYear > 2000 + var = ['"' var '"']; + end + + % OUTPUT value + fprintf(datei, '%s', var); + + % OUTPUT separator + if s ~= size(cellArray, 2) + fprintf(datei, separator); + end + end + if z ~= size(cellArray, 1) % prevent a empty line at EOF + % OUTPUT newline + fprintf(datei, '\n'); + end +end +% Closing file +fclose(datei); +% END \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/cprint.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/cprint.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,19 @@ +function cprint(l, varargin) +% --- +% function cprint(l, varargin) +% +% sprintfs statement varargin if verbosity level +% globalvars.debug >= l +% +% lvl 1: error / very important info +% lvl 2 : warning, mid-level runtime info +% lvl 3 : high-frequency runtime / debug info +% +% --- + + global globalvars; + + if isfield(globalvars, 'debug') && globalvars.debug >= l + display(sprintf(varargin{:})); + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/csv2cell.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/csv2cell.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,152 @@ +function data = csv2cell(varargin) +% CSV2CELL - parses a Windows CSV file into an NxM cell array, where N is +% the number of lines in the CSV text and M is the number of fields in the +% longest line of the CSV file. Lines are delimited by carriage returns +% and/or newlines. +% +% A Windows CSV file format allows for commas (,) and double quotes (") to +% be contained within fields of the CSV file. Regular fields are just text +% separated by commas (e.g. foo,bar,hello world). Fields containing commas +% or double quotes are surrounded by double quotes (e.g. +% foo,bar,"item1,item2,item3",hello world). In the previous example, +% "item1,item2,item3" is one field in the CSV text. For double quotes to be +% represented, they are written in pairs in the file, and contained within +% a quoted field, (e.g. foo,"this field contains ""quotes""",bar). Spaces +% within fields (even leading and trailing) are preserved. +% +% All fields from the CSV file are returned as strings. If the CSV text +% contains lines with different numbers of fields, then the "missing" +% fields with appear as empty arrays, [], in the returned data. You can +% easily convert the data you expect to be numeric using str2num() and +% num2cell(). +% +% Examples: +% >> csv2cell('foo.csv','fromfile') % loads and parses entire file +% >> csv2cell(',,,') % returns cell array {'','','',''} +% >> csv2cell(',,,','text') % same as above, declaring text input +% >> csv2cell(sprintf('%s\r\n%s',... +% '"Ten Thousand",10000,,"10,000","""It''s ""10 Grand"", baby",10k',... +% ',foo,bar,soo')) +% ans = +% 'Ten Thousand' '10000' '' '10,000' [1x22 char] '10k' +% '' 'foo' 'bar' 'soo' [] [] +% >> % note the two empty [] cells, because the second line has two fewer +% >> % fields than the first. The empty field '' at the beginning of the +% >> % second line is due to the leading comma on that line, which is +% >> % correct behavior. A trailing comma will do the same to the end of a +% >> % line. +% +% Limitations/Exceptions: +% * This code is untested on large files. It may take a long time due to +% variables growing inside loops (yes, poor practice, but easy coding). +% * This code has been minimally tested to work with a variety of weird +% Excel files that I have. +% * Behavior with improperly formatted CSV files is untested. +% * Technically, CSV files from Excel always separate lines with the pair +% of characters \r\n. This parser will also separate lines that have only +% \r or \n as line terminators. +% * Line separation is the first operation. I don't think the Excel CSV +% format has any allowance for newlines or carriage returns within +% fields. If it does, then this parser does not support it and would not +% return bad output. +% +% Copyright 2008 Arthur Hebert + +% Process arguments +if nargin == 1 + text = varargin{1}; +elseif nargin == 2 + switch varargin{2} + case 'fromfile' + filename = varargin{1}; + fid = fopen(filename); + text = char(fread(fid))'; + fclose(fid); + case 'text' + text = varargin{1}; + otherwise + error('Invalid 2nd argument %s. Valid options are ''fromfile'' and ''text''',varargin{2}) + end +else + error('CSV2CELL requires 1 or 2 arguments.') +end + + +% First split it into lines +lines = regexp(text,'(\r\n|[\r\n])','split'); % lines should now be a cell array of text split by newlines + +% a character is either a delimiter or a field +inField = true; +inQuoteField = false; +% if inField && ~inQuoteField --> then we're in a raw field + +skipNext = false; +data = {}; +field = ''; +for lineNumber = 1:length(lines) + nChars = length(lines{lineNumber}); % number of characters in this line + fieldNumber = 1; + for charNumber = 1:nChars + if skipNext + skipNext = false; + continue + end + thisChar = lines{lineNumber}(charNumber); + if thisChar == ',' + if inField + if inQuoteField % this comma is part of the field + field(end+1) = thisChar; + else % this comma is the delimiter marking the end of the field + data{lineNumber,fieldNumber} = field; + field = ''; + fieldNumber = fieldNumber + 1; + end + else % we are not currently in a field -- this is the start of a new delimiter + inField = true; + end + if charNumber == nChars % this is a hanging comma, indicating the last field is blank + data{lineNumber,fieldNumber} = ''; + field = ''; + fieldNumber = fieldNumber + 1; + end + elseif thisChar == '"' + if inField + if inQuoteField + if charNumber == nChars % it's the last character, so this must be the closing delimiter? + inField = false; + inQuoteField = false; + data{lineNumber,fieldNumber} = field; + field = ''; + fieldNumber = fieldNumber + 1; + else + if lines{lineNumber}(charNumber+1) == '"' % this is translated to be a double quote in the field + field(end+1) = '"'; + skipNext = true; + else % this " is the delimiter ending this field + data{lineNumber,fieldNumber} = field; + field = ''; + inField = false; + inQuoteField = false; + fieldNumber = fieldNumber + 1; + end + end + else % this is a delimiter and we are in a new quote field + inQuoteField = true; + end + else % we are not in a field. This must be an opening quote for the first field? + inField = true; + inQuoteField = true; + end + else % any other character ought to be added to field + field(end+1) = thisChar; + if charNumber == nChars + data{lineNumber,fieldNumber} = field; + field = ''; + fieldNumber = fieldNumber + 1; + elseif charNumber == 1 % we are starting a new raw field + inField = true; + end + end + end +end + \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/hash.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/hash.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,79 @@ +function h = hash(inp,meth) +% HASH - Convert an input variable into a message digest using any of +% several common hash algorithms +% +% USAGE: h = hash(inp,'meth') +% +% inp = input variable, of any of the following classes: +% char, uint8, logical, double, single, int8, uint8, +% int16, uint16, int32, uint32, int64, uint64 +% h = hash digest output, in hexadecimal notation +% meth = hash algorithm, which is one of the following: +% MD2, MD5, SHA-1, SHA-256, SHA-384, or SHA-512 +% +% NOTES: (1) If the input is a string or uint8 variable, it is hashed +% as usual for a byte stream. Other classes are converted into +% their byte-stream values. In other words, the hash of the +% following will be identical: +% 'abc' +% uint8('abc') +% char([97 98 99]) +% The hash of the follwing will be different from the above, +% because class "double" uses eight byte elements: +% double('abc') +% [97 98 99] +% You can avoid this issue by making sure that your inputs +% are strings or uint8 arrays. +% (2) The name of the hash algorithm may be specified in lowercase +% and/or without the hyphen, if desired. For example, +% h=hash('my text to hash','sha256'); +% (3) Carefully tested, but no warranty. Use at your own risk. +% (4) Michael Kleder, Nov 2005 +% +% EXAMPLE: +% +% algs={'MD2','MD5','SHA-1','SHA-256','SHA-384','SHA-512'}; +% for n=1:6 +% h=hash('my sample text',algs{n}); +% disp([algs{n} ' (' num2str(length(h)*4) ' bits):']) +% disp(h) +% end + +inp=inp(:); +% convert strings and logicals into uint8 format +if ischar(inp) || islogical(inp) + inp=uint8(inp); +else % convert everything else into uint8 format without loss of data + inp=typecast(inp,'uint8'); +end + +% verify hash method, with some syntactical forgiveness: +meth=upper(meth); +switch meth + case 'SHA1' + meth='SHA-1'; + case 'SHA256' + meth='SHA-256'; + case 'SHA384' + meth='SHA-384'; + case 'SHA512' + meth='SHA-512'; + otherwise +end +algs={'MD2','MD5','SHA-1','SHA-256','SHA-384','SHA-512'}; +if isempty(strmatch(meth,algs,'exact')) + error(['Hash algorithm must be ' ... + 'MD2, MD5, SHA-1, SHA-256, SHA-384, or SHA-512']); +end + +% create hash +x=java.security.MessageDigest.getInstance(meth); +x.update(inp); +h=typecast(x.digest,'uint8'); +h=dec2hex(h)'; +if(size(h,1))==1 % remote possibility: all hash bytes < 128, so pad: + h=[repmat('0',[1 size(h,2)]);h]; +end +h=lower(h(:)'); +clear x +return \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/implode.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/implode.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,15 @@ +function out = implode(divider, input); +% out = implode(divider, input) + +out = []; +for i = 1:numel(input) + + % only put divider inbetween data + if i > 1 + + out = sprintf('%s%s%d', out, divider, input(i)); + else + + out = sprintf('%d', input(i)); + end +end diff -r 000000000000 -r cc4b1211e677 core/tools/kldiv.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/kldiv.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,101 @@ +function KL = kldiv(varValue,pVect1,pVect2,varargin) +%KLDIV Kullback-Leibler or Jensen-Shannon divergence between two distributions. +% KLDIV(X,P1,P2) returns the Kullback-Leibler divergence between two +% distributions specified over the M variable values in vector X. P1 is a +% length-M vector of probabilities representing distribution 1, and P2 is a +% length-M vector of probabilities representing distribution 2. Thus, the +% probability of value X(i) is P1(i) for distribution 1 and P2(i) for +% distribution 2. The Kullback-Leibler divergence is given by: +% +% KL(P1(x),P2(x)) = sum[P1(x).log(P1(x)/P2(x))] +% +% If X contains duplicate values, there will be an warning message, and these +% values will be treated as distinct values. (I.e., the actual values do +% not enter into the computation, but the probabilities for the two +% duplicate values will be considered as probabilities corresponding to +% two unique values.) The elements of probability vectors P1 and P2 must +% each sum to 1 +/- .00001. +% +% A "log of zero" warning will be thrown for zero-valued probabilities. +% Handle this however you wish. Adding 'eps' or some other small value +% to all probabilities seems reasonable. (Renormalize if necessary.) +% +% KLDIV(X,P1,P2,'sym') returns a symmetric variant of the Kullback-Leibler +% divergence, given by [KL(P1,P2)+KL(P2,P1)]/2. See Johnson and Sinanovic +% (2001). +% +% KLDIV(X,P1,P2,'js') returns the Jensen-Shannon divergence, given by +% [KL(P1,Q)+KL(P2,Q)]/2, where Q = (P1+P2)/2. See the Wikipedia article +% for "Kullback–Leibler divergence". This is equal to 1/2 the so-called +% "Jeffrey divergence." See Rubner et al. (2000). +% +% EXAMPLE: Let the event set and probability sets be as follow: +% X = [1 2 3 3 4]'; +% P1 = ones(5,1)/5; +% P2 = [0 0 .5 .2 .3]' + eps; +% +% Note that the event set here has duplicate values (two 3's). These +% will be treated as DISTINCT events by KLDIV. If you want these to +% be treated as the SAME event, you will need to collapse their +% probabilities together before running KLDIV. One way to do this +% is to use UNIQUE to find the set of unique events, and then +% iterate over that set, summing probabilities for each instance of +% each unique event. Here, we just leave the duplicate values to be +% treated independently (the default): +% KL = kldiv(X,P1,P2); +% KL = +% 19.4899 +% +% Note also that we avoided the log-of-zero warning by adding 'eps' +% to all probability values in P2. We didn't need to renormalize +% because we're still within the sum-to-one tolerance. +% +% REFERENCES: +% 1) Cover, T.M. and J.A. Thomas. "Elements of Information Theory," Wiley, +% 1991. +% 2) Johnson, D.H. and S. Sinanovic. "Symmetrizing the Kullback-Leibler +% distance." IEEE Transactions on Information Theory (Submitted). +% 3) Rubner, Y., Tomasi, C., and Guibas, L. J., 2000. "The Earth Mover's +% distance as a metric for image retrieval." International Journal of +% Computer Vision, 40(2): 99-121. +% 4) Kullback–Leibler divergence. Wikipedia, The Free Encyclopedia. +% +% See also: MUTUALINFO, ENTROPY + +if ~isequal(unique(varValue),sort(varValue)), + warning('KLDIV:duplicates','X contains duplicate values. Treated as distinct values.') +end +if ~isequal(size(varValue),size(pVect1)) || ~isequal(size(varValue),size(pVect2)), + error('All inputs must have same dimension.') +end +% Check probabilities sum to 1: +if (abs(sum(pVect1) - 1) > .00001) || (abs(sum(pVect2) - 1) > .00001), + error('Probablities don''t sum to 1.') +end + +if ~isempty(varargin), + switch varargin{1}, + case 'js', + logQvect = log2((pVect2+pVect1)/2); + KL = .5 * (sum(pVect1.*(log2(pVect1)-logQvect)) + ... + sum(pVect2.*(log2(pVect2)-logQvect))); + + case 'sym', + KL1 = sum(pVect1 .* (log2(pVect1)-log2(pVect2))); + KL2 = sum(pVect2 .* (log2(pVect2)-log2(pVect1))); + KL = (KL1+KL2)/2; + + otherwise + error(['Last argument' ' "' varargin{1} '" ' 'not recognized.']) + end +else + KL = sum(pVect1 .* (log2(pVect1)-log2(pVect2))); +end + + + + + + + + diff -r 000000000000 -r cc4b1211e677 core/tools/license.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/license.txt Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,24 @@ +Copyright (c) 2004-2010, Sylvain Fiedler +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/cut_Y.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/cut_Y.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,20 @@ +function r = cut_Y(r, valid_idx) +% disables the invalid parts of the ranking structure Y + + % get indices not mentioned + u = find(~valid_idx); + + % clear bad data + for i = 1 : numel(u) + + r{u(i), 1} = []; + r{u(i), 2} = []; + end + + % also clear weights if applicable + if size(r,2) == 3 + for i = 1 : numel(u) + r{u(i), 3} = 0; + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/cvpartition_alltrain.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/cvpartition_alltrain.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,50 @@ +% --- +% fake partitioning for comparison +% to stober08: +% --- +classdef cvpartition_alltrain + +properties (Hidden) + + mtest; + mtraining; +end +properties + N; + NumTestSets; + TrainSize; + TestSize; +end + + +methods + +% --- +% constuctor: directly calculates the truncated testset +% --- +function P = cvpartition_alltrain(nData, nRuns) + + P.NumTestSets = nRuns; + P.N = nData; + + % build training and test sets + for i = 1:P.NumTestSets + P.TrainSize(i) = nData; + P.TestSize(i) = nData; + P.mtraining{i} = ones(P.N, 1); + P.mtest{i} = ones(P.N, 1); + end +end + +function out = test(P, i) + + out = P.mtest{i}; +end + +function out = training(P, i) + + out = P.mtraining{i}; +end + +end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/cvpartition_trunctrain.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/cvpartition_trunctrain.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,73 @@ +% --- +% class cvpartition_trunctrain +% NOTE: this is a fake cvpartition double for +% using cvpartitions in truncated-training size experiments +% --- +classdef cvpartition_trunctrain + +properties (Hidden) + + mtest; + mtraining; +end +properties + N; + NumTestSets; + TrainSize; + TestSize; +end + + +methods + +% --- +% constuctor: directly calculates the truncated testset +% --- +function P = cvpartition_trunctrain(Pin, perctrain) + + P.N = Pin.N; + P.NumTestSets = Pin.NumTestSets; + + for i = 1:Pin.NumTestSets + + % copy testing data + P.TestSize(i) = Pin.TestSize(i); + P.mtest{i} = Pin.test(i); + + % calculate new training size + P.TrainSize(i) = ceil(perctrain * Pin.TrainSize(i)); + + % get actual training indices + idx = find(Pin.training(i)); + + % --- + % TODO: save the permutation in a global variable, + % tomake the same smaller set available + % for all further experiments. + % moreover, it would be great if the smaller training sets + % are subsets of the bigger ones + % --- + tokeep = randperm(numel(idx)); + tokeep = tokeep(1:P.TrainSize(i)); + + % get indices to keep + idx = idx(tokeep); + + % build truncated training set + P.mtraining{i} = false(P.N, 1); + P.mtraining{i}(idx) = true; + end +end + +function out = test(P, i) + + out = P.mtest{i}; +end + +function out = training(P, i) + + out = P.mtraining{i}; +end + +end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/cvpartition_trunctrain_incsubsets.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/cvpartition_trunctrain_incsubsets.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,128 @@ +% --- +% class cvpartition_trunctrain +% NOTE: this is a fake cvpartition double for +% using cvpartitions in truncated-training size experiments +% +% differently from cvpartition_trunctrain, we take all the training sizes +% at once and generate training partitions where the smaller ones are subsets +% of the bigger ones +% --- +classdef cvpartition_trunctrain_incsubsets + +properties (Hidden) + + mtest; + mtraining; +end +properties + N; + NumTestSets; + TrainSize; + TestSize; +end + + +methods + +% --- +% constuctor: directly calculates the truncated testset +% --- +function P = cvpartition_trunctrain_incsubsets(Pin, perctrain) + + % --- + % NOTE: we use a different permutation for each cv-Buun (testset), + % as otherwise the very small training sets will have about the same + % data + % --- + if ~cvpartition_trunctrain_incsubsets.exists_permutation(Pin) + cvpartition_trunctrain_incsubsets.renew_permutation(Pin); + end + + P.N = Pin.N; + P.NumTestSets = Pin.NumTestSets; + + for i = 1:Pin.NumTestSets + + % copy testing data + P.TestSize(i) = Pin.TestSize(i); + P.mtest{i} = Pin.test(i); + + % calculate new training size + P.TrainSize(i) = ceil(perctrain * Pin.TrainSize(i)); + + % get actual training indices + idx = find(Pin.training(i)); + + % --- + % NOTE: the Test-Set-Specific permutation is applied + % we only extract as many indices as fit in Pin + % --- + permu = cvpartition_trunctrain_incsubsets.get_permutation(i,Pin.TrainSize(i)); + + % truncate the indices + idx = idx(permu(1:P.TrainSize(i))); + + % build truncated training set + P.mtraining{i} = false(P.N, 1); + P.mtraining{i}(idx) = true; + end +end +function out = test(P, i) + + out = P.mtest{i}; +end + +function out = training(P, i) + + out = P.mtraining{i}; +end +end + +methods (Static) + + % --- + % TODO: save the permutation in a global variable, + % tomake the same smaller set available + % for all further experiments. + % moreover, it would be great if the smaller training sets + % are subsets of the bigger ones + % --- + function renew_permutation(P) + global globalvars; + + if isfield(globalvars.camir, ... + 'cvpartition_trunctrain_incsubsets'); + warning 'renwewing permutations for train sets'; + end + + for i = 1:P.NumTestSets + globalvars.camir.cvpartition_trunctrain_incsubsets.permutation(i).data = ... + randperm(P.N); + end + end + + function idx = get_permutation(testId, trainSize) + % returns the permutation for specific test set + global globalvars; + + idx = globalvars.camir.cvpartition_trunctrain_incsubsets.permutation(testId).data; + + % cut the permutation to contain no exxcess numbers + idx = idx(idx <= trainSize); + end + + function out = exists_permutation(P) + global globalvars; + if isfield(globalvars.camir, ... + 'cvpartition_trunctrain_incsubsets'); + + out = (numel(globalvars.camir.cvpartition_trunctrain_incsubsets.permutation) == P.NumTestSets) ... + && (numel(globalvars.camir.cvpartition_trunctrain_incsubsets.permutation(1).data) == P.N); + + + else out = false; + end + end + +end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/display_mahalanobis_metric.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/display_mahalanobis_metric.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,66 @@ +function display_mahalanobis_metric(A, labels) +% display a mala matrix and its stats + +if nargin < 2 + labels = num2cell(1:size(A,1)); + +elseif ~iscell(labels) + + features = labels; + labels = features.labels; +end + + + +figure; + +% plot matrix +imagesc(A); +axis xy; + +% set labels +set(gca,'YTick', 1:numel(labels), ... + 'YTickLabel', labels); +set(gca,'XTick',1:numel(labels), ... + 'XTickLabel', labels); + +% --- +% approximate parameter weights: +% diagonal and sum(abs(row)) +% TODO: make nshow dependend on percentile +% --- + +nshow = min(numel(labels), 50); +figure; + +% get diagonal values of the Matrix +diagw = abs(diag(A)); + +% --- +% weight with feature values if possible +% --- +if exist('features','var') + + diagw = diagw.* mean(features.vector(),2); +end + + +[diagw, idx] = sort(diagw, 'descend'); + +% normalise +alld = sum(diagw); + +% plot +bar(diagw(1:nshow)./ alld); +set(gca,'XTick',1:nshow, ... + 'XTickLabel', labels(idx(1:nshow))); + +ylabel ('relevance factor'); + +if exist('features','var') + xlabel 'normalised weight' +else + xlabel 'matrix factors' +end + +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/get_fo_deltas.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/get_fo_deltas.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,69 @@ +function [deltas, idx, weights] = get_fo_deltas(r, X, rectify) +% [deltas, idx, (weights)] = get_fo_deltas(r, X) +% +% returns the difference vectors (dist(a,c) > dist(a,b)) +% for a given ranking: +% deltas{i}(:,1) = a - b, deltas{i}(:,2) = a - c; +% +% set rectify to output absolute values of training examples + +if nargin < 3 + rectify = 0; +end + +% --- +% NOTE: this preallocation is not complete +% --- +deltas = {}; +idx = {}; +weights = []; +for i = 1:size(r,1) + + % feature index + a = i; + + % check if ranking is valid + if ~isempty(r{i,1}) && ~isempty(r{i,2})&& ... + isempty(intersect(r{i,1}, r{i,2})); + + % --- + % NOTE / TODO: the follwing is intended for compability + % both sides of the ranking may have more than one entry. + % for the MTT database, the ranking may be correct, but the + % inequalities build from non-singular rankings are not + % based on the actual data + % --- + for j = 1:numel(r{i,1}) + b = r{i,1}(j); + + for k = 1:numel(r{i,2}) + c = r{i,2}(k); + + % --- + % get vector deltas, + % NOTE: first: dissimilar, then similar pair + % --- + [dac] = X(:,a) - X(:,c); + [dab] = X(:,a) - X(:,b); + + if ~rectify + deltas{end+1} = [dac dab]; + else + % --- + % rectify the data for training + % --- + deltas{end+1} = abs([dac dab]); + end + + idx{end+1} = {[a c],[a b]}; + + % save weights + if size(r,2) == 3 + weights(end+1) = r{i,3}(1); + end + end + end + end +end +end + diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/get_itml_deltas.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/get_itml_deltas.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,85 @@ +function [X, C, idx] = get_itml_deltas(r, in) +% [X, C, idx] = get_itml_deltas(r, in) + +%ITML Specs +% C: 4 column matrix +% column 1, 2: index of constrained points. Indexes between 1 and n +% column 3: 1 if points are similar, -1 if dissimilar +% column 4: right-hand side (lower or upper bound, depending on +% whether points are similar or dissimilar) +% +% X: (n x m) data matrix - each row corresponds to a single instance +% --- +% NOTE: X is thus input in transposed shape for the ITML algorithm +% --- + +% --- +% NOTE: this preallocation is not complete +% --- +X = zeros(size(in,1), 0); +C = zeros(0,4); +idx = zeros(0,2); + +for i = 1:size(r,1) + + % feature indexing + a = i; + + % check if ranking is valid + if ~isempty(r{i,1}) && ~isempty(r{i,2})&& ... + isempty(intersect(r{i,1}, r{i,2})); + + % --- + % NOTE / TODO: the follwing is intended for compability + % both sides of the ranking may have more than one entry. + % for the MTT database, the ranking may be correct, but the + % inequalities build from non-singular rankings are not + % based on the actual data + % --- + for j = 1:numel(r{i,1}) + b = r{i,1}(j); + + for k = 1:numel(r{i,2}) + c = r{i,2}(k); + + % --- + % get vector deltas + % --- + [dab] = in(:,a) - in(:,b); + [dac] = in(:,a) - in(:,c); + + % --- + % save deltas in new feature matrix + % TODO: this method has duplicate entries + % if the pairs appear more than once + % index the data set and use more efficiently!!! + % --- + X = [X dab]; + idx(end+1,:) = [a b]; + iab = size(idx, 1); + + X = [X dac]; + idx(end+1,:) = [a c]; + iac = size(idx, 1); + + % --- + % NOTE: + % in terms of the constraint, + % this should mean: dac - dab >= 1 + % + % 4th position cannot be 0, converges to Inf if > 1 + % -1,-1 learns the opposite of what constraitns say + % --- + C(end+1, :) = [iab iac -1 -1]; + end + end + end +end + +% % --- +% % NOTE: here, we transpose the X for usage i nthe training +% % --- +% X = X'; + +end + diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/get_svmlight_inequalities_from_ranking.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/get_svmlight_inequalities_from_ranking.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,120 @@ +function [lhs, rhs, factors, invalid] = get_svmlight_inequalities_from_ranking(r, X) +% [lhs, rhs] = get_svmlight_inequalities_from_ranking(r, features) +% +% prepares ranking data to be used with the svmlight implementation +% of Schultz & Joachims 2003 + +% --- +% CAVE: this file still allows arbitrary rankings +% this could be good but also introducing +% confusion in the future. Especially the +% weightings are only defined per query +% --- + + +% --- +% normalise weightings +% NOTE: not necessary +% --- +% if (size(r,2) > 2) +% max_weight = max(cell2mat(r(:,3))); +% end + +% --- +% fix right hand side to one +% (after Schultz & Joachims 2003) +% --- +fix_rhs = 1; +fix_weight = 1; + +% --- +% NOTE: this preallocation is not complete +% --- +lhs = cell(0,2); +factors = []; +invalid = []; +for i = 1:size(r,1) + + % feature index + a = i; + + % check if ranking is valid + if ~isempty(r{i,1}) && ~isempty(r{i,2})&& ... + isempty(intersect(r{i,1}, r{i,2})); + + % --- + % NOTE / TODO: the follwing is intended for compability + % both sides of the ranking may have more than one entry. + % for the MTT database, the ranking may be correct, but the + % inequalities build from non-singular rankings are not + % based on the actual data + % --- + for j = 1:numel(r{i,1}) + b = r{i,1}(j); + + for k = 1:numel(r{i,2}) + c = r{i,2}(k); + + % --- + % get vector deltas + % --- + [dab] = get_delta(X(:,a), X(:,b)); + [dac] = get_delta(X(:,a), X(:,c)); + + % get the delta difference vector + ddiff = dac - dab; + + % --- + % save the non-empty differences row by row + % NOTE: it is not clear whether the indexing for + % \omega starts a 0 or 1. + % ---- + xgzero = find( ddiff ~= 0); + + if ~isempty(xgzero) + lhs = cat(1,lhs,{xgzero, ddiff(xgzero)}); + else + invalid = [invalid i]; + end + + % save factors + if (nargout > 2) + if (size(r,2) > 2) + factors = [factors, r{i,3}]; % / max_weight + else + factors = [factors, fix_weight]; + end + end + + end + end + end +end + +% --- +% determine right hand side +% --- +rhs = ones(size(lhs,1), 1) .* fix_rhs; +cprint(2, 'SVMLight data: %d invalid rankings excluded from training set', numel(invalid)); +end + + +function out = get_delta(a, b, A) +% returns the pointwise (transformed) feature vector differences + +if nargin == 2 + + % --- + % return squared factors as in euclidean distance + % --- + out = (a - b).^2; + +else + + % transform vectors before accessing difference + out = (A' * a - A' * b).^2; +end + +end + + diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/mlr_repeat_YX_by_rating.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/mlr_repeat_YX_by_rating.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,32 @@ +function [Yout, Xout] = mlr_repeat_YX_by_rating(Y, X, weights) +% [Yout, Xout] = mlr_repeat_YX_by_rating(Y, X, weights) +% +% repeats the data in X and Y according to the weights specified +% in weights or Y(:,3) + +cprint(2, 'Repeating data for weighted learning'); +if nargin == 2 + weights = cell2mat(Y(:,3)); +end + +Yout = Y(:,1:2); + +valid = ~(cellfun(@isempty, Y(:,1)) | cellfun(@isempty, Y(:,2))); + +for i = 1:size(Y, 1) + if valid(i) + for j = 2:weights(i) + Yout(end + 1, :) = Y(i, 1:2); + end + end +end + +Xout = zeros(size(X,1), size(Y,1)); +Xout(:,1:size(X,2)) = X; +for i = 1:size(Y, 1) + if valid(i) + for j = 2:weights(i) + Xout(:,end+1) = X(:,i); + end + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/save_svmlight_inequalities.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/save_svmlight_inequalities.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,57 @@ +function success = save_svmlight_inequalities(lhs, rhs, factors, file) +% success = save_svmlight_inequalities(lhs, rhs, file) +% +% saves the optimisation problem given by lhs and rhs to +% a svmlight data file. the individual equations can +% be weighted using +% +% success = save_svmlight_inequalities(lhs, rhs, factors, file) + +if nargin == 3 + file = factors; + factors = []; +end + +% open file +fid = fopen(file, 'w+'); +if fid < 1 + success = 0; + return; +end + +try + % write individual constraint rows + for i = 1:size(lhs,1) + + % --- + % print rows:" rhs #fnum:#fval #fnum:#fval #fnum:#fval ..." + % --- + + % print right hand side + fprintf(fid,'%d ', rhs(i)); + + % print cost factors if availablefactor + if (numel(lhs{i,1}) > 0) && (numel(factors) >= i) + + fprintf(fid,'cost:%f ', factors(i)); + end + + % print left hand side + for j = 1:numel(lhs{i,1}) + + fprintf(fid,'%d:%2.16f ', lhs{i,1}(j), lhs{i,2}(j)); + end + + % finish line + fprintf(fid,'\n'); + end +catch + success = 0; + fclose(fid); + return; +end +success = 1; +fclose(fid); + + + \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/scale_ratings.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/scale_ratings.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +function Y = scale_ratings(Y, max_weight) +% [Yout, Xout] = scale_ratings(Y, max_weight) + + +% get maximal weight +weights = cell2mat(Y(:,3)); +max_dataweight = max(weights); + +valid = ~(cellfun(@isempty, Y(:,1)) | cellfun(@isempty, Y(:,2))); + +% scale weights to a maximal value of max_weight +for i = 1:size(Y, 1) + if valid(i) && Y{i,3} > 0 + Y{i,3} = min(max_weight, max(1, round((Y{i,3} / max_dataweight) * max_weight))); + end +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/sim_get_traintest_clip_overlap.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/sim_get_traintest_clip_overlap.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,37 @@ +function [relative, absolute, test_cn, train_cn] = sim_get_traintest_clip_overlap(datafile) +% +% get_traintest_clip_overlap(datafile) +% +% returns the percentage of overlapping constraints +% with the corresponding test set for each training set +% +% how many percent of the test set are reappearing in the training set + + +% simdata = load(datafile); +if nargin < 1 + simdata = load('comp_partBinData_unclustered_cupaper_01'); +else + simdata = load(datafile); +end +nTestSets = size(simdata.partBinTst, 2); % num cv bins +ntrainsizes = size(simdata.partBinTrn, 2); % num increases of training + + +absolute = zeros(nTestSets, ntrainsizes); +relative = zeros(nTestSets, ntrainsizes); +for k = 1:nTestSets + + test_clips = unique([simdata.partBinTst{k}(:,1); simdata.partBinTst{k}(:,2); simdata.partBinTst{k}(:,3)]); + test_cn(k) = numel(test_clips); + for m = 1:ntrainsizes + + train_clips = unique([simdata.partBinTrn{k,m}(:,1); simdata.partBinTrn{k,m}(:,2); simdata.partBinTrn{k,m}(:,3)]); + + same = intersect(train_clips, test_clips); + + absolute(k,m) = numel(same); + relative(k,m) = absolute(k,m) / numel(test_clips); + end + train_cn(k) = numel(train_clips); +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/svmlight2weight.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/svmlight2weight.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,23 @@ +function w = svmlight2weight(modelfile) +% returns the dual weight vecor for a given svm model +% +% All the script does is compute the weighted sum of the support vectors +% (first element in line is alpha*y, what follows is the feature vector). +% For further info, see the comment in the model file for its format. + +tagstr = python('svmlight2weight.py', modelfile); + +[firstidx] = strfind(tagstr, '1 :'); + +%get start of vector description +tagstr = tagstr(firstidx:end); + +% parse text function output +vecs = textscan(tagstr,'%d %f','Delimiter',':'); + +% initialise +w = zeros(max(vecs{1}),1); + +% set values +w(vecs{1}) = vecs{2}; + diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/svmlight2weight.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/svmlight2weight.py Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,93 @@ +# Compute the weight vector of linear SVM based on the model file +# Original Perl Author: Thorsten Joachims (thorsten@joachims.org) +# Python Version: Ori Cohen (orioric@gmail.com) +# Call: python svm2weights.py svm_model + +import sys +from operator import itemgetter + +try: + import psyco + psyco.full() +except ImportError: + print 'Psyco not installed, the program will just run slower' + +def sortbyvalue(d,reverse=True): + ''' proposed in PEP 265, using the itemgetter this function sorts a dictionary''' + return sorted(d.iteritems(), key=itemgetter(1), reverse=True) + +def sortbykey(d,reverse=True): + ''' proposed in PEP 265, using the itemgetter this function sorts a dictionary''' + return sorted(d.iteritems(), key=itemgetter(0), reverse=False) + +def get_file(): + """ + Tries to extract a filename from the command line. If none is present, it + assumes file to be svm_model (default svmLight output). If the file + exists, it returns it, otherwise it prints an error message and ends + execution. + """ + # Get the name of the data file and load it into + if len(sys.argv) < 2: + # assume file to be svm_model (default svmLight output) + print "Assuming file as svm_model" + filename = 'svm_model' + #filename = sys.stdin.readline().strip() + else: + filename = sys.argv[1] + + + try: + f = open(filename, "r") + except IOError: + print "Error: The file '%s' was not found on this system." % filename + sys.exit(0) + + return f + + + + +if __name__ == "__main__": + f = get_file() + i=0 + lines = f.readlines() + printOutput = True + w = {} + for line in lines: + if i>10: + features = line[:line.find('#')-1] + comments = line[line.find('#'):] + alpha = features[:features.find(' ')] + feat = features[features.find(' ')+1:] + for p in feat.split(' '): # Changed the code here. + a,v = p.split(':') + if not (int(a) in w): + w[int(a)] = 0 + for p in feat.split(' '): + a,v = p.split(':') + w[int(a)] +=float(alpha)*float(v) + elif i==1: + if line.find('0')==-1: + print 'Not linear Kernel!\n' + printOutput = False + break + elif i==10: + if line.find('threshold b')==-1: + print "Parsing error!\n" + printOutput = False + break + + i+=1 + f.close() + + #if you need to sort the features by value and not by feature ID then use this line intead: + #ws = sortbyvalue(w) + + ws = sortbykey(w) + if printOutput == True: + for (i,j) in ws: + print i,':',j + i+=1 + + diff -r 000000000000 -r cc4b1211e677 core/tools/machine_learning/weighted_kmeans.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/machine_learning/weighted_kmeans.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,163 @@ +function [centres, cweights, post, errlog, options] = weighted_kmeans(centres, data, weights, options) +%[centres, cweights, post, errlog, options] = weighted_kmeans(centres,data, weights, options) +% +% weighted_kmeans Trains a k means cluster model on weighted input vectors +% +% Adapted from the Netlab Toolbox by Daniel Wolff, +% This function takes a WEIGHTS vector, containing weights for the +% different data points. This can be used for training with varying +% discretisation intervals. +% +% Description +% CENTRES = weighted_kmeans(NCENTRES, DATA, WEIGHTS, OPTIONS) or +% CENTRES = weighted_kmeans(CENTRES, DATA, WEIGHTS, OPTIONS) uses the batch K-means +% algorithm to set the centres of a cluster model. The matrix DATA +% represents the data which is being clustered, with each row +% corresponding to a vector. The sum of squares error function is used. +% The point at which a local minimum is achieved is returned as +% CENTRES. The error value at that point is returned in OPTIONS(8). +% +% +% POST and ERRLOG +% also return the cluster number (in a one-of-N encoding) for each +% data point in POST and a log of the error values after each cycle in +% ERRLOG. The optional parameters have the following +% interpretations. +% +% OPTIONS(1) is set to 1 to display error values; also logs error +% values in the return argument ERRLOG. If OPTIONS(1) is set to 0, then +% only warning messages are displayed. If OPTIONS(1) is -1, then +% nothing is displayed. +% +% OPTIONS(2) is a measure of the absolute precision required for the +% value of CENTRES at the solution. If the absolute difference between +% the values of CENTRES between two successive steps is less than +% OPTIONS(2), then this condition is satisfied. +% +% OPTIONS(3) is a measure of the precision required of the error +% function at the solution. If the absolute difference between the +% error functions between two successive steps is less than OPTIONS(3), +% then this condition is satisfied. Both this and the previous +% condition must be satisfied for termination. +% +% OPTIONS(14) is the maximum number of iterations; default 100. +% +% See also +% GMMINIT, GMMEM +% + +% Copyright (c) Ian T Nabney (1996-2001) + +[ndata, data_dim] = size(data); +[ncentres, dim] = size(centres); + +if dim ~= data_dim + if dim == 1 && ncentres == 1 && centres > 1 + + if ndata == numel(weights) + + % --- + % allow for number of centres specification + % --- + dim = data_dim; + ncentres = centres; + + options(5) = 1; + else + error('Data dimension does not match number of weights') + end + + else + error('Data dimension does not match dimension of centres') + end +end + +if (ncentres > ndata) + error('More centres than data') +end + +% Sort out the options +if (options(14)) + niters = options(14); +else + niters = 100; +end + +store = 0; +if (nargout > 3) + store = 1; + errlog = zeros(1, niters); +end + +% Check if centres and posteriors need to be initialised from data +if (options(5) == 1) + % Do the initialisation + perm = randperm(ndata); + perm = perm(1:ncentres); + + % Assign first ncentres (permuted) data points as centres + centres = data(perm, :); +end +% Matrix to make unit vectors easy to construct +id = eye(ncentres); + +% save accumulated weight for a center +cweights = zeros(ncentres, 1); + +% Main loop of algorithm +for n = 1:niters + + % Save old centres to check for termination + old_centres = centres; + + % Calculate posteriors based on existing centres + d2 = dist2(data, centres); + % Assign each point to nearest centre + [minvals, index] = min(d2', [], 1); + post = logical(id(index,:)); + + % num_points = sum(post, 1); + % Adjust the centres based on new posteriors + for j = 1:ncentres + if (sum(weights(post(:,j))) > 0) + % --- + % NOTE: this is edited to include the weights. + % Instead of summing the vectors directly, the vectors are weighted + % and then the result is divided by the sum of the weights instead + % of the number of vectors for this class + % --- + cweights(j) = sum(weights(post(:,j))); + + centres(j,:) = sum(diag(weights(post(:,j))) * data(post(:,j),:), 1)... + /cweights(j); + end + end + + % Error value is total squared distance from cluster centres + % edit: weighted by the vectors weight + e = sum(minvals .* weights); + if store + errlog(n) = e; + end + if options(1) > 0 + fprintf(1, 'Cycle %4d Error %11.6f\n', n, e); + end + + if n > 1 + % Test for termination + if max(max(abs(centres - old_centres))) < options(2) & ... + abs(old_e - e) < options(3) + options(8) = e; + return; + end + end + old_e = e; +end + +% If we get here, then we haven't terminated in the given number of +% iterations. +options(8) = e; +if (options(1) >= 0) + disp(maxitmess); +end + diff -r 000000000000 -r cc4b1211e677 core/tools/migrate_to_test_dir.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/migrate_to_test_dir.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,53 @@ +function out = migrate_to_test_dir(varargin) +% +% out = migrate_to_test_dir('testname', 'this is a test description') +% create test run directory, add current dir to path and move +% into test run dir +% +% NOTE: the test directory is created in "globalvars.testdatadir" if set +% + +global globalvars; + +[sTest,fixedtestdir, unused] = process_options(varargin, ... + 'testname', '','fixedtestdir', ''); + +% use starting script name as description if none is given +if isempty(sTest) + [ST, I] = dbstack(); + sTest = ST(end).name; +end + + +% switch to test directory, and add this or current dir to path +if ~isempty(globalvars.tstoutputpath) && ~isempty(dir(globalvars.tstoutputpath)); + addpath(globalvars.tstoutputpath); + cd(globalvars.tstoutputpath); +else + addpath(pwd); +end + +% get camir version +[~,cv] = camirversion(); + +% get current date +sDate = datestr(now,'yymmdd'); + +if isempty(fixedtestdir) + newdir = sprintf('%s_%s_r%d',sDate,sTest,cv); +else + newdir = fixedtestdir; +end + +% create dir if not existent +if isempty(dir(newdir)); + mkdir(newdir); +end + +cd(newdir); +if ~isempty(strfind(pwd, newdir)) + out = newdir; +else + warning 'cannot migrate to specified test directory, Ill dump right here'; + out = -1; +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/param_combinations.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/param_combinations.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,44 @@ +function combis = param_combinations(params, pos) +% given a param struct with multiple options for diverse +% parameters, param_combinations(params) returns all +% valid combinations of the param sets + +if nargin < 2 + pos = 1; +end + +% get available fields +fields = fieldnames(params); + +nparams = params; + if pos <= numel(fields) + + for j = 1:numel(params.(fields{pos})) + + % --- + % successively ralter the params struct, + % choosing one of the options + % --- + if ~iscell(params.(fields{pos})) + + nparams.(fields{pos}) = params.(fields{pos})(j); + else + + nparams.(fields{pos}) = params.(fields{pos}){j}; + end + + if j == 1 + combis = param_combinations(nparams, pos + 1); + + else + + % gather the resulting configurations, in reverse order + % regarding the recursion + combis = cat(1, param_combinations(nparams, pos + 1), combis); + end + end + else + % we have reached the leaves, containing single combinations + combis = nparams; + end +end diff -r 000000000000 -r cc4b1211e677 core/tools/print_error.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/print_error.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,17 @@ +function out = print_error(err) +% --- +% print error messages +% and following lines: stack +% --- +out.msg = sprintf('%s\n',err.message); +if nargout < 1 + fprintf(out.msg); +end + +for e = 1:length(err.stack) + out.line{e} = sprintf('%s at %i\n',err.stack(e).name, err.stack(e).line); + if nargout < 1 + fprintf(out.line{e}); + end +end + diff -r 000000000000 -r cc4b1211e677 core/tools/python.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/python.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,75 @@ +function [result status] = python(varargin) +%python Execute python command and return the result. +% python(pythonFILE) calls python script specified by the file pythonFILE +% using appropriate python executable. +% +% python(pythonFILE,ARG1,ARG2,...) passes the arguments ARG1,ARG2,... +% to the python script file pythonFILE, and calls it by using appropriate +% python executable. +% +% RESULT=python(...) outputs the result of attempted python call. If the +% exit status of python is not zero, an error will be returned. +% +% [RESULT,STATUS] = python(...) outputs the result of the python call, and +% also saves its exit status into variable STATUS. +% +% See also SYSTEM, JAVA, MEX. + +% Copyright 1990-2007 The MathWorks, Inc. +% $Revision: 1.1.4.8 $ + +cmdString = ''; + +% Add input to arguments to operating system command to be executed. +% (If an argument refers to a file on the MATLAB path, use full file path.) +for i = 1:nargin + thisArg = varargin{i}; + if isempty(thisArg) || ~ischar(thisArg) + error('MATLAB:perl:InputsMustBeStrings', 'All input arguments must be valid strings.'); + end + if i==1 + if exist(thisArg, 'file')==2 + % This is a valid file on the MATLAB path + if isempty(dir(thisArg)) + % Not complete file specification + % - file is not in current directory + % - OR filename specified without extension + % ==> get full file path + thisArg = which(thisArg); + end + else + % First input argument is pythonFile - it must be a valid file + error('MATLAB:perl:FileNotFound', 'Unable to find python file: %s', thisArg); + end + end + + % Wrap thisArg in double quotes if it contains spaces + if any(thisArg == ' ') + thisArg = ['"', thisArg, '"']; + end + + % Add argument to command string + cmdString = [cmdString, ' ', thisArg]; +end + +% Execute python script +errTxtNoPerl = 'Unable to find python executable.'; + +if isempty(cmdString) + error('MATLAB:perl:NoPerlCommand', 'No python command specified'); +elseif ispc + % PC + perlCmd = []; % fullfile('C:\Program Files (x86)\Python3x'); + cmdString = ['python' cmdString]; + perlCmd = ['set PATH=',perlCmd, ';%PATH%&' cmdString]; + [status, result] = dos(perlCmd); +else + error('MATLAB:python:NoExecutable', errTxtNoPerl); +end + +% Check for errors in shell command +if nargout < 2 && status~=0 + error('MATLAB:perl:ExecutionError', ... + 'System error: %sCommand executed: %s', result, cmdString); +end + diff -r 000000000000 -r cc4b1211e677 core/tools/reset_now.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/reset_now.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,11 @@ +% reset_now.m +global globalvars; +cd(globalvars.camir.path) +cd .. +clear; +clear global; +clear all; +clear global; +clear; + +startup; \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/sqlescape.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/sqlescape.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function out = sqlescape(in) + +out = strrep(in, '''', ''''''); + +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/strcell2matrix.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/strcell2matrix.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,32 @@ +function [out] = strcell2matrix(in, start, finish) +% +% out = strcell2matrix(in,start) +% +% in: cell array containing string numbers (from csv2cell) +% start: start offset of first number [row, column] +% finish: end offset + +% e.g. annots = strcell2matrix(annotations_final,[1 0], [0 0]) + +if nargin == 1 + start = [0 0]; + finish = [0 0]; +end + +if nargin == 2 + finish = [0 0]; +end + + +% --- +% Get the data and ignore the rest +% --- +out = zeros(size(in) - start - finish); + + +for i = (1 + start(1)):size(in,1) - finish(1) + for j = (1 + start(1)):size(in,2) - finish(2) + + out(i - start(1), j - start(2)) = str2double(in{i,j}); + end +end diff -r 000000000000 -r cc4b1211e677 core/tools/strcellfind.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/strcellfind.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,30 @@ +function out = strcellfind(strdb, str, findAll) +% out = strcellfind(strdb, str) +% +% finds a string within an cell array of strings +% only outputs the first occurence, unless +% findAll is set to true +% +% strcellfind is NOT CASE sensitive + +if nargin < 3 + findAll = 0; +end + +out = []; +for i = 1:length(strdb) + if strcmpi( lower(char(strdb{i})), str) == 1; + if ~findAll + out = i; + return; + else + out(end+1) = i; + end + end +end +% --- +% NOTE: Bad backwards compability +% --- +% if isempty(out) +% out = -1; +% end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/substrcellfind.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/substrcellfind.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,37 @@ +function [idx, strpos] = substrcellfind(strdb, str, findAll) +% [idx, strpos] = substrcellfind(strdb, str) +% +% finds a string within an cell array of strings +% only outputs the first occurence, unless +% findAll is set to true +% +% substrcellfind is NOT CASE sensitive + +if nargin < 3 + findAll = 0; +end + +idx = []; +strpos = []; +for i = 1:length(strdb) + + % search string in cell db + tpos = strfind(lower(char(strdb{i})), lower(str)); + + if ~isempty(tpos) + + strpos(end+1) = tpos; + + if ~findAll + + idx = i; + return; + else + idx(end+1) = i; + end + end +end + +% if isempty(idx) +% idx = -1; +% end diff -r 000000000000 -r cc4b1211e677 core/tools/uimage.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/uimage.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,132 @@ +function h = uimage(varargin) +%UIMAGE Display image with uneven axis. +% UIMAGE(X,Y,C) displays matrix C as an image, using the vectors X and +% Y to specify the X and Y coordinates. X and Y may be unevenly spaced +% vectors, but must be increasing. The size of C must be LENGTH(Y)* +% LENGTH(X). (Most probably you'll want to display C' instead of C). +% +% Contrary to Matlab's original IMAGE function, here the vectors X and Y +% do not need to be linearly spaced. Whereas IMAGE linearly interpolates +% the X-axis between X(1) and X(end), ignoring all other values (idem +% for Y), UIMAGE allows for X and/or Y to be unevenly spaced vectors, by +% locally stretching the matrix C (ie, by duplicating some elements of C) +% for larger X and/or Y intervals. +% +% The syntax for UIMAGE(X,Y,C,...) is the same as IMAGE(X,Y,C,...) +% (all the remaining arguments, eg 'PropertyName'-PropertyValue pairs, +% are passed to IMAGE). See IMAGE for details. +% +% Use UIMAGESC to scale the data using the full colormap. The syntax for +% UIMAGESC(X,Y,C,...) is the same as IMAGESC(X,Y,C,...). +% +% Typical uses: +% - Plotting a spatio-temporal diagram (T,X), with unevenly spaced +% time intervals for T (eg, when some values are missing, or when +% using a non-constant sampling rate). +% - Plotting a set of power spectra with frequency in log-scale. +% +% h = UIMAGE(X,Y,C,...) returns a handle to the image. +% +% Example: +% c = randn(50,20); % Random 50x20 matrix +% x = logspace(1,3,50); % log-spaced X-axis, between 10 and 1000 +% y = linspace(3,8,20); % lin-spaced Y-axis, between 3 and 8 +% uimagesc(x,y,c'); % displays the matrix +% +% F. Moisy +% Revision: 1.03, Date: 2006/06/14. +% +% See also IMAGE, IMAGESC, UIMAGESC. + + +% History: +% 2006/06/12: v1.00, first version. +% 2006/06/14: v1.03, minor bug fixed; works in ML6. + +error(nargchk(3,inf,nargin)); + +% maximum number of matrix elements to interpolate the uneven axis +% (typically between 500 and 5000): +nmax = 2000; + +x = varargin{1}; +y = varargin{2}; +c = varargin{3}; + +if any(diff(x)<=0) || any(diff(y)<=0) + error('The X and Y axis should be increasing.'); +end + +dx = min(diff(x)); % smallest interval for X +dy = min(diff(y)); % smallest interval for Y + +% test if X and Y are linearly spaced (to within 10^-12): +evenx = all(abs(diff(x)/dx-1)<1e-12); % true if X is linearly spaced +eveny = all(abs(diff(y)/dy-1)<1e-12); % true if Y is linearly spaced + + +if evenx && eveny % X and Y both evenly spaced + + xe = x; + ye = y; + ce = c; + +elseif evenx && ~eveny % X even and Y uneven + + nx = length(x); + xe = x; + + ny = ceil(1 + (y(end) - y(1))/dy); % number of points for Y + ny = min(ny, nmax); + ye = linspace(y(1), y(end), ny); + + ce = zeros(ny,nx); + + for j=1:ny + indj = find(y<=ye(j)); + ce(j,1:nx) = c(indj(end), 1:nx); + end; + +elseif ~evenx && eveny % X uneven and Y even + + nx = ceil(1 + (x(end) - x(1))/dx); % number of points for X + nx = min(nx, nmax); + xe = linspace(x(1), x(end), nx); + + ny = length(y); + ye = y; + + ce = zeros(ny,nx); + + for i=1:nx + indi = find(x<=xe(i)); + ce(1:ny,i) = c(1:ny, indi(end)); + end; + +elseif ~evenx && ~eveny % X and Y both uneven + + nx = ceil(1 + (x(end) - x(1))/dx); % number of points for X + nx = min(nx, nmax); + xe = linspace(x(1), x(end), nx); + + ny = ceil(1 + (y(end) - y(1))/dy); % number of points for Y + ny = min(ny, nmax); + ye = linspace(y(1), y(end), ny); + + ce = zeros(ny,nx); + + for i=1:nx + for j=1:ny + indi = find(x<=xe(i)); + indj = find(y<=ye(j)); + ce(j,i) = c(indi(end), indj(end)); + end; + end; + +end + +hh = image(xe, ye, ce, varargin{4:end}); + +if nargout>0 + h = hh; +end diff -r 000000000000 -r cc4b1211e677 core/tools/uimagesc.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/uimagesc.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,57 @@ +function h = uimagesc(varargin) +%UIMAGESC Display scaled image with uneven axis. +% UIMAGESC(...) is the same as UIMAGE(...) except the data is scaled +% to use the full colormap. See UIMAGE for details. +% +% Note: UIMAGESC is based on Matlab's original IMAGESC, Revision 5.11.4.5. +% UIMAGESC simply calls UIMAGE with a scaled colormap. +% +% F. Moisy - adapted from TMW +% Revision: 1.01, Date: 2006/06/13. +% +% See also IMAGE, IMAGESC, UIMAGE. + +% History: +% 2006/06/12: v1.00, first version. + +clim = []; +switch (nargin), + case 0, + hh = uimage('CDataMapping','scaled'); + case 1, + hh = uimage(varargin{1},'CDataMapping','scaled'); + case 3, + hh = uimage(varargin{:},'CDataMapping','scaled'); + otherwise, + + % Determine if last input is clim + if isequal(size(varargin{end}),[1 2]) + str = false(length(varargin),1); + for n=1:length(varargin) + str(n) = ischar(varargin{n}); + end + str = find(str); + if isempty(str) || (rem(length(varargin)-min(str),2)==0), + clim = varargin{end}; + varargin(end) = []; % Remove last cell + else + clim = []; + end + else + clim = []; + end + hh = uimage(varargin{:},'CDataMapping','scaled'); +end + +% Get the parent Axes of the image +cax = ancestor(hh,'axes'); + +if ~isempty(clim), + set(cax,'CLim',clim) +elseif ~ishold(cax), + set(cax,'CLimMode','auto') +end + +if nargout > 0 + h = hh; +end diff -r 000000000000 -r cc4b1211e677 core/tools/uplot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/uplot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,195 @@ +function han = uplot(varargin) +% UPLOT Plot of double and FRD data. +% +% UPLOT([plot_type],SYS1,SYS2,SYS3, ...) +% UPLOT([plot_type],[1 10],[.1 5],SYS1, ...) +% UPLOT([plot_type],SYS1,'linetype1',SYS2,'linetype2',...) +% +% Plot double and FRD objects. The syntax is the same as the MATLAB +% plot command except that all data is contained in SYSi, and the +% axes are specified by PLOT_TYPE. +% +% The (optional) plot_type argument must be one of: +% +% 'iv,d' matin .vs. independent variable (default option) +% 'iv,m' magnitude .vs. independent variable +% 'iv,lm' log(magnitude) .vs. independent variable +% 'iv,p' phase .vs. independent variable +% 'liv,d' matin .vs. log(independent variable) +% 'liv,m' magnitude .vs. log(independent variable) +% 'liv,lm' log(magnitude) .vs. log(independent variable) +% 'liv,p' phase .vs. log(independent variable) +% 'nyq' real .vs. imaginary (parametrized by indep variable) +% 'nic' Nichols chart +% 'bode' Bode magnitude and phase plots +% +%See also: BODE, LOGLOG, PLOT, NICHOLS, NYQUIST, SEMILOGX, SEMILOGY, SIGMA. + +% Copyright 2004 The MathWorks, Inc. + +nin = nargin; +if isa(varargin{1},'char') + plottype = varargin{1}; + sidx = 2; +else + plottype = 'iv,d'; + sidx = 1; +end + +argcell = cell(0,1); +cnt = 1; +cflag = 0; +dflag = 0; +ydataloc = []; +for i=sidx:nin + arg = varargin{i}; + switch class(arg) + case 'frd' + if dflag==1 + error('Double data must come in pairs'); + else + cflag = 0; + szm = size(arg); + if length(szm)==2 + npts = length(arg.Frequency); + ydata = reshape(arg.ResponseData,[szm(1)*szm(2) npts]).'; + xdata = arg.Frequency; + argcell = [argcell;{xdata};{ydata}]; + ydataloc = [ydataloc;cnt+1]; + cnt = cnt + 2; + else + nad = length(szm) - 2; + npts = length(arg.Frequency); + tmp = permute(arg.ResponseData,[1 2 4:4+nad-1 3]); + ydata = reshape(tmp,[prod(szm) npts]).'; + xdata = arg.Frequency; + argcell = [argcell;{xdata};{ydata}]; + ydataloc = [ydataloc;cnt+1]; + cnt = cnt + 2; + end + end + case 'char' + if dflag==1 + error('Double data must come in pairs'); + else + if cflag==0 + argcell = [argcell;{arg}]; + cnt = cnt + 1; + cflag = 1; + else + error('Never have 2 chars in a row'); + end + end + case 'double' + cflag = 0; + if dflag==0 % think xdata + argcell = [argcell;{arg}]; + cnt = cnt + 1; + dflag = 1; + elseif dflag==1 % think ydata + argcell = [argcell;{arg}]; + ydataloc = [ydataloc;cnt]; + cnt = cnt + 1; + dflag = 0; + end + otherwise + if isuncertain(arg) + error('Cannot plot uncertain matrices or systems'); + else + error('Cannot plot this type of data'); + end + end +end +xmin = inf; +xmax = -inf; +for i=1:length(ydataloc) + xmin = min([xmin min(argcell{ydataloc(i)-1})]); + xmax = max([xmax max(argcell{ydataloc(i)-1})]); +end +for i=1:length(ydataloc) + if length(argcell{ydataloc(i)})==1 + argcell{ydataloc(i)} = [argcell{ydataloc(i)} argcell{ydataloc(i)}]; + argcell{ydataloc(i)-1} = [xmin xmax]; + end +end + +switch plottype +case 'iv,d' + h = plot(argcell{:}); +case 'iv,m' + for i=1:length(ydataloc) + argcell{ydataloc(i)} = abs(argcell{ydataloc(i)}); + end + h = plot(argcell{:}); +case 'iv,lm' + for i=1:length(ydataloc) + argcell{ydataloc(i)} = abs(argcell{ydataloc(i)}); + end + h = semilogy(argcell{:}); +case 'iv,p' + for i=1:length(ydataloc) + argcell{ydataloc(i)} = (180/pi)*angle(argcell{ydataloc(i)}); + end + h = plot(argcell{:}); +case 'liv,d' + h = semilogx(argcell{:}); +case 'liv,ld' + h = loglog(argcell{:}); +case 'liv,m' + for i=1:length(ydataloc) + argcell{ydataloc(i)} = abs(argcell{ydataloc(i)}); + end + h = semilogx(argcell{:}); +case 'liv,lm' + for i=1:length(ydataloc) + argcell{ydataloc(i)} = abs(argcell{ydataloc(i)}); + end + h = loglog(argcell{:}); +case 'liv,p' + for i=1:length(ydataloc) + argcell{ydataloc(i)} = (180/pi)*angle(argcell{ydataloc(i)}); + end + h = semilogx(argcell{:}); +case {'nyq'} + for i=1:length(ydataloc) + %x-data, real part + argcell{ydataloc(i)-1} = real(argcell{ydataloc(i)}); + argcell{ydataloc(i)} = imag(argcell{ydataloc(i)}); + end + h = plot(argcell{:}); +case {'ri'} + for i=1:length(ydataloc) + %x-data, real part + argcell{ydataloc(i)-1} = real(argcell{ydataloc(i)}); + %y-data, imag part + argcell{ydataloc(i)} = imag(argcell{ydataloc(i)}); + end + h = plot(argcell{:}); +case {'nic'} + for i=1:length(ydataloc) + %x-data, imag part + argcell{ydataloc(i)-1} = 360/(2*pi)*negangle(argcell{ydataloc(i)}); + %y-data, real part + argcell{ydataloc(i)} = 20*log10(abs(argcell{ydataloc(i)})); + end + h = plot(argcell{:}); +case 'bode' + subplot(2,1,1) + magcell = argcell; + for i=1:length(ydataloc) + magcell{ydataloc(i)} = abs(magcell{ydataloc(i)}); + end + hm = loglog(magcell{:}); + subplot(2,1,2) + for i=1:length(ydataloc) + argcell{ydataloc(i)} = (180/pi)*angle(argcell{ydataloc(i)}); + end + hp = semilogx(argcell{:}); + h = [hm;hp]; +otherwise + error('invalid plot type'); +end + +if nargout==1 + han = h; +end \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 core/tools/write_mat_results_ismir12.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/core/tools/write_mat_results_ismir12.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,51 @@ +function write_mat_resutls_ismir12(filein, subrun, fileout) +% write_mat_resutls_ismir12(filein, subrun, fileout) +% +% write results from result file into ismir12 generic format. +% if subrun is a vector, the results are averaged over the +% runs specified + + +[out, stats, features, individual] = test_generic_display_results(filein); + +% --- +% we get the single individual values and +% --- +for i = 1:numel(subrun) + individual = individual(subrun); + + % number of inctrain cycles + n_inctrain = numel(individual.diag.inctrain); + + % --- + % ok_train_unused + % --- + values_ok_train_unused{i} = reshape([individual.diag.inctrain.ok_notin_train], [],n_inctrain); + values_ok_train_unused{i} = values_ok_train_unused(1:2:end,:).*100; + + mean_ok_train_unused{i} = mean(values_ok_train_unused, 1); + var_ok_train_unused{i} = var(values_ok_train_unused,[], 1); + + % --- + % ok_train + % --- + values_ok_train{i} = reshape([individual.diag.inctrain.ok_train], [],n_inctrain); + values_ok_train{i} = values_ok_train(1:2:end,:).*100; + + mean_ok_train{i} = mean(values_ok_train, 1); + var_ok_train{i} = var(values_ok_train,[], 1); + + % --- + % ok_test + % --- + values_ok_test{i} = reshape([individual.diag.inctrain.ok_test], [],n_inctrain); + values_ok_test{i} = values_ok_test(1:2:end,:).*100; + + mean_ok_test{i} = mean(values_ok_test, 1); + var_ok_test{i} = var(values_ok_test,[], 1); +end + + + +clear ('out', 'stats', 'features', 'individual'); +save(fileout) diff -r 000000000000 -r cc4b1211e677 editme_startup.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/editme_startup.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,35 @@ +% --- +% startup.m local +% this is the place to set system-specific parameters and paths +% --- + +global globalvars; + +% --- +% @TODO: PLEASE insert your absolute path to the folder containing this +% script +% --- + my_path = 'C:\_danielwolff\_documents\coding\matlab\music_research_branches\ISMIR2012'; + +% --- +% Add Toolboxes +% --- +addpath(my_path); + +addpath(genpath([my_path '/toolboxes/bioakustik_tools'])) +addpath(genpath([my_path '/toolboxes/graph_visualisation/graphViz4Matlab/util'])) + +addpath(pathsdiff(genpath([my_path]),... + '.svn')); + +% smv-light +addtosystempath([my_path '/toolboxes/SVM-light']); + + +globalvars.tstaudiopath = [my_path '/features']; +globalvars.mfilepath = ''; +globalvars.tstoutputpath = [my_path '/features']; +globalvars.systemslash = filesep; + +cd ('core'); +startup_music_research; diff -r 000000000000 -r cc4b1211e677 features/runlog_bbdb1a9b81dce2e200b729b429223942_feat.mat Binary file features/runlog_bbdb1a9b81dce2e200b729b429223942_feat.mat has changed diff -r 000000000000 -r cc4b1211e677 features/runlog_bbdb1a9b81dce2e200b729b429223942_param.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/features/runlog_bbdb1a9b81dce2e200b729b429223942_param.xml Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ + + + + 1 + 1 + 1 + 1 + 1 + 0 + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/ChangeLog.Sourceforge.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/ChangeLog.Sourceforge.txt Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,4436 @@ + + +2007-02-11 17:12 nsaunier + + * BNT/learning/learn_struct_pdag_pc.m: Bug submitted by Imme Ebert-Uphoff (ebert@tree.com) (see Thu Feb 8, 2007 email on the BNT mailing list). + +2005-11-26 12:12 yozhik + + * BNT/inference/dynamic/@hmm_inf_engine/fwdback_twoslice.m: merged + fwdback_twoslice.m to release branch + +2005-11-25 17:24 nsaunier + + * BNT/inference/dynamic/@hmm_inf_engine/fwdback_twoslice.m: adding + old missing fwdback_twoslice.m + +2005-11-25 17:24 yozhik + + * BNT/inference/dynamic/@hmm_inf_engine/fwdback_twoslice.m: file + fwdback_twoslice.m was added on branch release-1_0 on 2005-11-26 + 20:12:05 +0000 + +2005-09-25 15:54 yozhik + + * BNT/add_BNT_to_path.m: fix paths + +2005-09-25 15:30 yozhik + + * BNT/add_BNT_to_path.m: Restored directories to path. + +2005-09-25 15:29 yozhik + + * HMM/fwdback_twoslice.m: added missing fwdback_twoslice + +2005-09-17 11:14 yozhik + + * ChangeLog, BNT/add_BNT_to_path.m, BNT/test_BNT.m, + BNT/examples/static/cmp_inference_static.m, + BNT/inference/dynamic/@hmm_inf_engine/enter_evidence.m: Merged + bug fixes from HEAD. + +2005-09-17 11:11 yozhik + + * ChangeLog: added change log + +2005-09-17 11:11 yozhik + + * ChangeLog: file ChangeLog was added on branch release-1_0 on + 2005-09-17 18:14:47 +0000 + +2005-09-17 10:00 yozhik + + * BNT/inference/dynamic/@hmm_inf_engine/enter_evidence.m: Temporary + rollback to fix error, per Kevin. + +2005-09-17 09:59 yozhik + + * BNT/examples/static/cmp_inference_static.m: Commented out + erroneous line, per Kevin. + +2005-09-17 09:58 yozhik + + * BNT/add_BNT_to_path.m: Changed to require BNT_HOME to be + predefined. + +2005-09-17 09:56 yozhik + + * BNT/test_BNT.m: Commented out problematic tests. + +2005-09-17 09:38 yozhik + + * BNT/test_BNT.m: renable tests + +2005-09-12 22:18 yozhik + + * KPMtools/pca_kpm.m: Initial import of code base from Kevin + Murphy. + +2005-09-12 22:18 yozhik + + * KPMtools/pca_kpm.m: Initial revision + +2005-08-29 10:44 yozhik + + * graph/: README.txt, Old/best_first_elim_order.m, + Old/dag_to_jtree.m, Old/dfs.m, Old/dsep_test.m, + Old/mk_2D_lattice_slow.m, acyclic.m, assignEdgeNums.m, + best_first_elim_order.m, check_jtree_property.m, + check_triangulated.m, children.m, cliques_to_jtree.m, + cliques_to_strong_jtree.m, connected_graph.m, + dag_to_essential_graph.m, dfs.m, dfs_test.m, dijkstra.m, + family.m, graph_separated.m, graph_to_jtree.m, + min_subtree_con_nodes.m, minimum_spanning_tree.m, minspan.m, + mk_2D_lattice.m, mk_2D_lattice_slow.m, mk_adj_mat.m, + mk_adjmat_chain.m, mk_all_dags.m, mk_nbrs_of_dag.m, + mk_nbrs_of_digraph.m, mk_nbrs_of_digraph_broken.m, + mk_nbrs_of_digraph_not_vectorized.m, mk_rnd_dag.m, + mk_rnd_dag_given_edge_prob.m, mk_rooted_tree.m, mk_undirected.m, + moralize.m, neighbors.m, parents.m, pred2path.m, + reachability_graph.m, scc.m, strong_elim_order.m, test.m, + test_strong_root.m, topological_sort.m, trees.txt, triangulate.c, + triangulate.m, triangulate_2Dlattice_demo.m, triangulate_test.m: + Initial import of code base from Kevin Murphy. + +2005-08-29 10:44 yozhik + + * graph/: README.txt, Old/best_first_elim_order.m, + Old/dag_to_jtree.m, Old/dfs.m, Old/dsep_test.m, + Old/mk_2D_lattice_slow.m, acyclic.m, assignEdgeNums.m, + best_first_elim_order.m, check_jtree_property.m, + check_triangulated.m, children.m, cliques_to_jtree.m, + cliques_to_strong_jtree.m, connected_graph.m, + dag_to_essential_graph.m, dfs.m, dfs_test.m, dijkstra.m, + family.m, graph_separated.m, graph_to_jtree.m, + min_subtree_con_nodes.m, minimum_spanning_tree.m, minspan.m, + mk_2D_lattice.m, mk_2D_lattice_slow.m, mk_adj_mat.m, + mk_adjmat_chain.m, mk_all_dags.m, mk_nbrs_of_dag.m, + mk_nbrs_of_digraph.m, mk_nbrs_of_digraph_broken.m, + mk_nbrs_of_digraph_not_vectorized.m, mk_rnd_dag.m, + mk_rnd_dag_given_edge_prob.m, mk_rooted_tree.m, mk_undirected.m, + moralize.m, neighbors.m, parents.m, pred2path.m, + reachability_graph.m, scc.m, strong_elim_order.m, test.m, + test_strong_root.m, topological_sort.m, trees.txt, triangulate.c, + triangulate.m, triangulate_2Dlattice_demo.m, triangulate_test.m: + Initial revision + +2005-08-26 18:08 yozhik + + * KPMtools/fullfileKPM.m: Initial import of code base from Kevin + Murphy. + +2005-08-26 18:08 yozhik + + * KPMtools/fullfileKPM.m: Initial revision + +2005-08-21 13:00 yozhik + + * + BNT/inference/static/@pearl_inf_engine/private/parallel_protocol.m: + Initial import of code base from Kevin Murphy. + +2005-08-21 13:00 yozhik + + * + BNT/inference/static/@pearl_inf_engine/private/parallel_protocol.m: + Initial revision + +2005-07-11 12:07 yozhik + + * KPMtools/plotcov2New.m: Initial import of code base from Kevin + Murphy. + +2005-07-11 12:07 yozhik + + * KPMtools/plotcov2New.m: Initial revision + +2005-07-06 12:32 yozhik + + * KPMtools/montageKPM2.m: Initial import of code base from Kevin + Murphy. + +2005-07-06 12:32 yozhik + + * KPMtools/montageKPM2.m: Initial revision + +2005-06-27 18:35 yozhik + + * KPMtools/montageKPM3.m: Initial import of code base from Kevin + Murphy. + +2005-06-27 18:35 yozhik + + * KPMtools/montageKPM3.m: Initial revision + +2005-06-27 18:30 yozhik + + * KPMtools/cell2matPad.m: Initial import of code base from Kevin + Murphy. + +2005-06-27 18:30 yozhik + + * KPMtools/cell2matPad.m: Initial revision + +2005-06-15 14:13 yozhik + + * BNT/CPDs/@gaussian_CPD/gaussian_CPD.m: Initial import of code + base from Kevin Murphy. + +2005-06-15 14:13 yozhik + + * BNT/CPDs/@gaussian_CPD/gaussian_CPD.m: Initial revision + +2005-06-08 18:56 yozhik + + * Kalman/testKalman.m: Initial import of code base from Kevin + Murphy. + +2005-06-08 18:56 yozhik + + * Kalman/testKalman.m: Initial revision + +2005-06-08 18:25 yozhik + + * HMM/: testHMM.m, fixed_lag_smoother_demo.m: Initial import of + code base from Kevin Murphy. + +2005-06-08 18:25 yozhik + + * HMM/: testHMM.m, fixed_lag_smoother_demo.m: Initial revision + +2005-06-08 18:22 yozhik + + * HMM/: README.txt, dhmm_em.m: Initial import of code base from + Kevin Murphy. + +2005-06-08 18:22 yozhik + + * HMM/: README.txt, dhmm_em.m: Initial revision + +2005-06-08 18:17 yozhik + + * HMM/fwdback.m: Initial import of code base from Kevin Murphy. + +2005-06-08 18:17 yozhik + + * HMM/fwdback.m: Initial revision + +2005-06-05 11:46 yozhik + + * KPMtools/: rectintLoopC.c, rectintLoopC.dll: Initial import of + code base from Kevin Murphy. + +2005-06-05 11:46 yozhik + + * KPMtools/: rectintLoopC.c, rectintLoopC.dll: Initial revision + +2005-06-01 12:39 yozhik + + * KPMtools/montageKPM.m: Initial import of code base from Kevin + Murphy. + +2005-06-01 12:39 yozhik + + * KPMtools/montageKPM.m: Initial revision + +2005-05-31 21:49 yozhik + + * KPMtools/initFigures.m: Initial import of code base from Kevin + Murphy. + +2005-05-31 21:49 yozhik + + * KPMtools/initFigures.m: Initial revision + +2005-05-31 11:19 yozhik + + * KPMstats/unidrndKPM.m: Initial import of code base from Kevin + Murphy. + +2005-05-31 11:19 yozhik + + * KPMstats/unidrndKPM.m: Initial revision + +2005-05-30 15:08 yozhik + + * KPMtools/filepartsLast.m: Initial import of code base from Kevin + Murphy. + +2005-05-30 15:08 yozhik + + * KPMtools/filepartsLast.m: Initial revision + +2005-05-29 23:01 yozhik + + * KPMtools/plotBox.m: Initial import of code base from Kevin + Murphy. + +2005-05-29 23:01 yozhik + + * KPMtools/plotBox.m: Initial revision + +2005-05-25 18:31 yozhik + + * KPMtools/plotColors.m: Initial import of code base from Kevin + Murphy. + +2005-05-25 18:31 yozhik + + * KPMtools/plotColors.m: Initial revision + +2005-05-25 12:11 yozhik + + * KPMtools/genpathKPM.m: Initial import of code base from Kevin + Murphy. + +2005-05-25 12:11 yozhik + + * KPMtools/genpathKPM.m: Initial revision + +2005-05-23 17:03 yozhik + + * netlab3.3/demhmc1.m: Initial import of code base from Kevin + Murphy. + +2005-05-23 17:03 yozhik + + * netlab3.3/demhmc1.m: Initial revision + +2005-05-23 16:44 yozhik + + * netlab3.3/gmminit.m: Initial import of code base from Kevin + Murphy. + +2005-05-23 16:44 yozhik + + * netlab3.3/gmminit.m: Initial revision + +2005-05-23 16:07 yozhik + + * netlab3.3/metrop.m: Initial import of code base from Kevin + Murphy. + +2005-05-23 16:07 yozhik + + * netlab3.3/metrop.m: Initial revision + +2005-05-22 23:23 yozhik + + * netlab3.3/demmet1.m: Initial import of code base from Kevin + Murphy. + +2005-05-22 23:23 yozhik + + * netlab3.3/demmet1.m: Initial revision + +2005-05-22 16:32 yozhik + + * KPMstats/: dirichletrnd.m, dirichletpdf.m, test_dir.m, + multirnd.m, multipdf.m: Initial import of code base from Kevin + Murphy. + +2005-05-22 16:32 yozhik + + * KPMstats/: dirichletrnd.m, dirichletpdf.m, test_dir.m, + multirnd.m, multipdf.m: Initial revision + +2005-05-13 13:52 yozhik + + * KPMtools/: asort.m, dirKPM.m: Initial import of code base from + Kevin Murphy. + +2005-05-13 13:52 yozhik + + * KPMtools/: asort.m, dirKPM.m: Initial revision + +2005-05-09 18:32 yozhik + + * netlab3.3/dem2ddat.m: Initial import of code base from Kevin + Murphy. + +2005-05-09 18:32 yozhik + + * netlab3.3/dem2ddat.m: Initial revision + +2005-05-09 15:20 yozhik + + * KPMtools/: mkdirKPM.m, optimalMatching.m, optimalMatchingTest.m, + subsets1.m: Initial import of code base from Kevin Murphy. + +2005-05-09 15:20 yozhik + + * KPMtools/: mkdirKPM.m, optimalMatching.m, optimalMatchingTest.m, + subsets1.m: Initial revision + +2005-05-09 09:47 yozhik + + * KPMtools/bipartiteMatchingDemo.m: Initial import of code base + from Kevin Murphy. + +2005-05-09 09:47 yozhik + + * KPMtools/bipartiteMatchingDemo.m: Initial revision + +2005-05-08 22:25 yozhik + + * KPMtools/bipartiteMatchingIntProg.m: Initial import of code base + from Kevin Murphy. + +2005-05-08 22:25 yozhik + + * KPMtools/bipartiteMatchingIntProg.m: Initial revision + +2005-05-08 21:45 yozhik + + * KPMtools/bipartiteMatchingDemoPlot.m: Initial import of code base + from Kevin Murphy. + +2005-05-08 21:45 yozhik + + * KPMtools/bipartiteMatchingDemoPlot.m: Initial revision + +2005-05-08 19:55 yozhik + + * KPMtools/subsetsFixedSize.m: Initial import of code base from + Kevin Murphy. + +2005-05-08 19:55 yozhik + + * KPMtools/subsetsFixedSize.m: Initial revision + +2005-05-08 15:48 yozhik + + * KPMtools/centeringMatrix.m: Initial import of code base from + Kevin Murphy. + +2005-05-08 15:48 yozhik + + * KPMtools/centeringMatrix.m: Initial revision + +2005-05-08 10:51 yozhik + + * netlab3.3/demgmm1.m: Initial import of code base from Kevin + Murphy. + +2005-05-08 10:51 yozhik + + * netlab3.3/demgmm1.m: Initial revision + +2005-05-06 18:09 yozhik + + * BNT/add_BNT_to_path.m: Initial import of code base from Kevin + Murphy. + +2005-05-06 18:09 yozhik + + * BNT/add_BNT_to_path.m: Initial revision + +2005-05-03 21:35 yozhik + + * KPMstats/standardize.m: Initial import of code base from Kevin + Murphy. + +2005-05-03 21:35 yozhik + + * KPMstats/standardize.m: Initial revision + +2005-05-03 13:18 yozhik + + * KPMstats/histCmpChi2.m: Initial import of code base from Kevin + Murphy. + +2005-05-03 13:18 yozhik + + * KPMstats/histCmpChi2.m: Initial revision + +2005-05-03 12:01 yozhik + + * KPMtools/strsplit.m: Initial import of code base from Kevin + Murphy. + +2005-05-03 12:01 yozhik + + * KPMtools/strsplit.m: Initial revision + +2005-05-02 13:19 yozhik + + * KPMtools/hsvKPM.m: Initial import of code base from Kevin Murphy. + +2005-05-02 13:19 yozhik + + * KPMtools/hsvKPM.m: Initial revision + +2005-04-27 11:34 yozhik + + * BNT/potentials/@dpot/: subsasgn.m, subsref.m: Initial import of + code base from Kevin Murphy. + +2005-04-27 11:34 yozhik + + * BNT/potentials/@dpot/: subsasgn.m, subsref.m: Initial revision + +2005-04-27 10:58 yozhik + + * KPMtools/mahal2conf.m, nethelp3.3/conffig.htm, + nethelp3.3/confmat.htm, nethelp3.3/conjgrad.htm, + nethelp3.3/consist.htm, nethelp3.3/convertoldnet.htm, + nethelp3.3/datread.htm, nethelp3.3/datwrite.htm, + nethelp3.3/dem2ddat.htm, nethelp3.3/demard.htm, + nethelp3.3/demev1.htm, nethelp3.3/demev2.htm, + nethelp3.3/demev3.htm, nethelp3.3/demgauss.htm, + nethelp3.3/demglm1.htm, nethelp3.3/demglm2.htm, + nethelp3.3/demgmm1.htm, nethelp3.3/demgmm2.htm, + nethelp3.3/demgmm3.htm, nethelp3.3/demgmm4.htm, + nethelp3.3/demgmm5.htm, nethelp3.3/demgp.htm, + nethelp3.3/demgpard.htm, nethelp3.3/demgpot.htm, + nethelp3.3/demgtm1.htm, nethelp3.3/demgtm2.htm, + nethelp3.3/demhint.htm, nethelp3.3/demhmc1.htm, + nethelp3.3/demhmc2.htm, nethelp3.3/demhmc3.htm, + nethelp3.3/demkmn1.htm, nethelp3.3/demknn1.htm, + nethelp3.3/demmdn1.htm, nethelp3.3/demmet1.htm, + nethelp3.3/demmlp1.htm, nethelp3.3/demmlp2.htm, + nethelp3.3/demnlab.htm, nethelp3.3/demns1.htm, + nethelp3.3/demolgd1.htm, nethelp3.3/demopt1.htm, + nethelp3.3/dempot.htm, nethelp3.3/demprgp.htm, + nethelp3.3/demprior.htm, nethelp3.3/demrbf1.htm, + nethelp3.3/demsom1.htm, nethelp3.3/demtrain.htm, + nethelp3.3/dist2.htm, nethelp3.3/eigdec.htm, + nethelp3.3/errbayes.htm, nethelp3.3/evidence.htm, + nethelp3.3/fevbayes.htm, nethelp3.3/gauss.htm, + nethelp3.3/gbayes.htm, nethelp3.3/glm.htm, + nethelp3.3/glmderiv.htm, nethelp3.3/glmerr.htm, + nethelp3.3/glmevfwd.htm, nethelp3.3/glmfwd.htm, + nethelp3.3/glmgrad.htm, nethelp3.3/glmhess.htm, + nethelp3.3/glminit.htm, nethelp3.3/glmpak.htm, + nethelp3.3/glmtrain.htm, nethelp3.3/glmunpak.htm, + nethelp3.3/gmm.htm, nethelp3.3/gmmactiv.htm, + nethelp3.3/gmmem.htm, nethelp3.3/gmminit.htm, + nethelp3.3/gmmpak.htm, nethelp3.3/gmmpost.htm, + nethelp3.3/gmmprob.htm, nethelp3.3/gmmsamp.htm, + nethelp3.3/gmmunpak.htm, nethelp3.3/gp.htm, + nethelp3.3/gpcovar.htm, nethelp3.3/gpcovarf.htm, + nethelp3.3/gpcovarp.htm, nethelp3.3/gperr.htm, + nethelp3.3/gpfwd.htm, nethelp3.3/gpgrad.htm, + nethelp3.3/gpinit.htm, nethelp3.3/gppak.htm, + nethelp3.3/gpunpak.htm, nethelp3.3/gradchek.htm, + nethelp3.3/graddesc.htm, nethelp3.3/gsamp.htm, + nethelp3.3/gtm.htm, nethelp3.3/gtmem.htm, nethelp3.3/gtmfwd.htm, + nethelp3.3/gtminit.htm, nethelp3.3/gtmlmean.htm, + nethelp3.3/gtmlmode.htm, nethelp3.3/gtmmag.htm, + nethelp3.3/gtmpost.htm, nethelp3.3/gtmprob.htm, + nethelp3.3/hbayes.htm, nethelp3.3/hesschek.htm, + nethelp3.3/hintmat.htm, nethelp3.3/hinton.htm, + nethelp3.3/histp.htm, nethelp3.3/hmc.htm, nethelp3.3/index.htm, + nethelp3.3/kmeans.htm, nethelp3.3/knn.htm, nethelp3.3/knnfwd.htm, + nethelp3.3/linef.htm, nethelp3.3/linemin.htm, + nethelp3.3/maxitmess.htm, nethelp3.3/mdn.htm, + nethelp3.3/mdn2gmm.htm, nethelp3.3/mdndist2.htm, + nethelp3.3/mdnerr.htm, nethelp3.3/mdnfwd.htm, + nethelp3.3/mdngrad.htm, nethelp3.3/mdninit.htm, + nethelp3.3/mdnpak.htm, nethelp3.3/mdnpost.htm, + nethelp3.3/mdnprob.htm, nethelp3.3/mdnunpak.htm, + nethelp3.3/metrop.htm, nethelp3.3/minbrack.htm, + nethelp3.3/mlp.htm, nethelp3.3/mlpbkp.htm, + nethelp3.3/mlpderiv.htm, nethelp3.3/mlperr.htm, + nethelp3.3/mlpevfwd.htm, nethelp3.3/mlpfwd.htm, + nethelp3.3/mlpgrad.htm, nethelp3.3/mlphdotv.htm, + nethelp3.3/mlphess.htm, nethelp3.3/mlphint.htm, + nethelp3.3/mlpinit.htm, nethelp3.3/mlppak.htm, + nethelp3.3/mlpprior.htm, nethelp3.3/mlptrain.htm, + nethelp3.3/mlpunpak.htm, nethelp3.3/netderiv.htm, + nethelp3.3/neterr.htm, nethelp3.3/netevfwd.htm, + nethelp3.3/netgrad.htm, nethelp3.3/nethelp3.3.zip, + nethelp3.3/nethess.htm, nethelp3.3/netinit.htm, + nethelp3.3/netopt.htm, nethelp3.3/netpak.htm, + nethelp3.3/netunpak.htm, nethelp3.3/olgd.htm, nethelp3.3/pca.htm, + nethelp3.3/plotmat.htm, nethelp3.3/ppca.htm, + nethelp3.3/quasinew.htm, nethelp3.3/rbf.htm, + nethelp3.3/rbfbkp.htm, nethelp3.3/rbfderiv.htm, + nethelp3.3/rbferr.htm, nethelp3.3/rbfevfwd.htm, + nethelp3.3/rbffwd.htm, nethelp3.3/rbfgrad.htm, + nethelp3.3/rbfhess.htm, nethelp3.3/rbfjacob.htm, + nethelp3.3/rbfpak.htm, nethelp3.3/rbfprior.htm, + nethelp3.3/rbfsetbf.htm, nethelp3.3/rbfsetfw.htm, + nethelp3.3/rbftrain.htm, nethelp3.3/rbfunpak.htm, + nethelp3.3/rosegrad.htm, nethelp3.3/rosen.htm, + nethelp3.3/scg.htm, nethelp3.3/som.htm, nethelp3.3/somfwd.htm, + nethelp3.3/sompak.htm, nethelp3.3/somtrain.htm, + nethelp3.3/somunpak.htm, netlab3.3/Contents.m, netlab3.3/LICENSE, + netlab3.3/conffig.m, netlab3.3/confmat.m, netlab3.3/conjgrad.m, + netlab3.3/consist.m, netlab3.3/convertoldnet.m, + netlab3.3/datread.m, netlab3.3/datwrite.m, netlab3.3/demard.m, + netlab3.3/demev1.m, netlab3.3/demev2.m, netlab3.3/demev3.m, + netlab3.3/demgauss.m, netlab3.3/demglm1.m, netlab3.3/demglm2.m, + netlab3.3/demgmm2.m, netlab3.3/demgmm3.m, netlab3.3/demgmm4.m, + netlab3.3/demgmm5.m, netlab3.3/demgp.m, netlab3.3/demgpard.m, + netlab3.3/demgpot.m, netlab3.3/demgtm1.m, netlab3.3/demgtm2.m, + netlab3.3/demhint.m, netlab3.3/demhmc2.m, netlab3.3/demhmc3.m, + netlab3.3/demkmn1.m, netlab3.3/demknn1.m, netlab3.3/demmdn1.m, + netlab3.3/demmlp1.m, netlab3.3/demmlp2.m, netlab3.3/demnlab.m, + netlab3.3/demns1.m, netlab3.3/demolgd1.m, netlab3.3/demopt1.m, + netlab3.3/dempot.m, netlab3.3/demprgp.m, netlab3.3/demprior.m, + netlab3.3/demrbf1.m, netlab3.3/demsom1.m, netlab3.3/demtrain.m, + netlab3.3/dist2.m, netlab3.3/eigdec.m, netlab3.3/errbayes.m, + netlab3.3/evidence.m, netlab3.3/fevbayes.m, netlab3.3/gauss.m, + netlab3.3/gbayes.m, netlab3.3/glm.m, netlab3.3/glmderiv.m, + netlab3.3/glmerr.m, netlab3.3/glmevfwd.m, netlab3.3/glmfwd.m, + netlab3.3/glmgrad.m, netlab3.3/glmhess.m, netlab3.3/glminit.m, + netlab3.3/glmpak.m, netlab3.3/glmtrain.m, netlab3.3/glmunpak.m, + netlab3.3/gmm.m, netlab3.3/gmmactiv.m, netlab3.3/gmmem.m, + netlab3.3/gmmpak.m, netlab3.3/gmmpost.m, netlab3.3/gmmprob.m, + netlab3.3/gmmsamp.m, netlab3.3/gmmunpak.m, netlab3.3/gp.m, + netlab3.3/gpcovar.m, netlab3.3/gpcovarf.m, netlab3.3/gpcovarp.m, + netlab3.3/gperr.m, netlab3.3/gpfwd.m, netlab3.3/gpgrad.m, + netlab3.3/gpinit.m, netlab3.3/gppak.m, netlab3.3/gpunpak.m, + netlab3.3/gradchek.m, netlab3.3/graddesc.m, netlab3.3/gsamp.m, + netlab3.3/gtm.m, netlab3.3/gtmem.m, netlab3.3/gtmfwd.m, + netlab3.3/gtminit.m, netlab3.3/gtmlmean.m, netlab3.3/gtmlmode.m, + netlab3.3/gtmmag.m, netlab3.3/gtmpost.m, netlab3.3/gtmprob.m, + netlab3.3/hbayes.m, netlab3.3/hesschek.m, netlab3.3/hintmat.m, + netlab3.3/hinton.m, netlab3.3/histp.m, netlab3.3/hmc.m, + netlab3.3/kmeansNetlab.m, netlab3.3/knn.m, netlab3.3/knnfwd.m, + netlab3.3/linef.m, netlab3.3/linemin.m, netlab3.3/maxitmess.m, + netlab3.3/mdn.m, netlab3.3/mdn2gmm.m, netlab3.3/mdndist2.m, + netlab3.3/mdnerr.m, netlab3.3/mdnfwd.m, netlab3.3/mdngrad.m, + netlab3.3/mdninit.m, netlab3.3/mdnnet.mat, netlab3.3/mdnpak.m, + netlab3.3/mdnpost.m, netlab3.3/mdnprob.m, netlab3.3/mdnunpak.m, + netlab3.3/minbrack.m, netlab3.3/mlp.m, netlab3.3/mlpbkp.m, + netlab3.3/mlpderiv.m, netlab3.3/mlperr.m, netlab3.3/mlpevfwd.m, + netlab3.3/mlpfwd.m, netlab3.3/mlpgrad.m, netlab3.3/mlphdotv.m, + netlab3.3/mlphess.m, netlab3.3/mlphint.m, netlab3.3/mlpinit.m, + netlab3.3/mlppak.m, netlab3.3/mlpprior.m, netlab3.3/mlptrain.m, + netlab3.3/mlpunpak.m, netlab3.3/netderiv.m, netlab3.3/neterr.m, + netlab3.3/netevfwd.m, netlab3.3/netgrad.m, netlab3.3/nethess.m, + netlab3.3/netinit.m, netlab3.3/netlab3.3.zip, + netlab3.3/netlogo.mat, netlab3.3/netopt.m, netlab3.3/netpak.m, + netlab3.3/netunpak.m, netlab3.3/oilTrn.dat, netlab3.3/oilTst.dat, + netlab3.3/olgd.m, netlab3.3/pca.m, netlab3.3/plotmat.m, + netlab3.3/ppca.m, netlab3.3/quasinew.m, netlab3.3/rbf.m, + netlab3.3/rbfbkp.m, netlab3.3/rbfderiv.m, netlab3.3/rbferr.m, + netlab3.3/rbfevfwd.m, netlab3.3/rbffwd.m, netlab3.3/rbfgrad.m, + netlab3.3/rbfhess.m, netlab3.3/rbfjacob.m, netlab3.3/rbfpak.m, + netlab3.3/rbfprior.m, netlab3.3/rbfsetbf.m, netlab3.3/rbfsetfw.m, + netlab3.3/rbftrain.m, netlab3.3/rbfunpak.m, netlab3.3/rosegrad.m, + netlab3.3/rosen.m, netlab3.3/scg.m, netlab3.3/som.m, + netlab3.3/somfwd.m, netlab3.3/sompak.m, netlab3.3/somtrain.m, + netlab3.3/somunpak.m, netlab3.3/xor.dat, netlabKPM/README.txt, + netlabKPM/demgmm1_movie.m, netlabKPM/evidence_weighted.m, + netlabKPM/glmerr_weighted.m, netlabKPM/glmgrad_weighted.m, + netlabKPM/glmhess_weighted.m, netlabKPM/glmtrain_weighted.m, + netlabKPM/gmm1.avi, netlabKPM/gmmem2.m, + netlabKPM/gmmem_multi_restart.m, netlabKPM/kmeans_demo.m, + netlabKPM/mlperr_weighted.m, netlabKPM/mlpgrad_weighted.m, + netlabKPM/mlphdotv_weighted.m, netlabKPM/mlphess_weighted.m, + netlabKPM/neterr_weighted.m, netlabKPM/netgrad_weighted.m, + netlabKPM/nethess_weighted.m, netlabKPM/netopt_weighted.m, + netlabKPM/process_options.m: Initial import of code base from + Kevin Murphy. + +2005-04-27 10:58 yozhik + + * KPMtools/mahal2conf.m, nethelp3.3/conffig.htm, + nethelp3.3/confmat.htm, nethelp3.3/conjgrad.htm, + nethelp3.3/consist.htm, nethelp3.3/convertoldnet.htm, + nethelp3.3/datread.htm, nethelp3.3/datwrite.htm, + nethelp3.3/dem2ddat.htm, nethelp3.3/demard.htm, + nethelp3.3/demev1.htm, nethelp3.3/demev2.htm, + nethelp3.3/demev3.htm, nethelp3.3/demgauss.htm, + nethelp3.3/demglm1.htm, nethelp3.3/demglm2.htm, + nethelp3.3/demgmm1.htm, nethelp3.3/demgmm2.htm, + nethelp3.3/demgmm3.htm, nethelp3.3/demgmm4.htm, + nethelp3.3/demgmm5.htm, nethelp3.3/demgp.htm, + nethelp3.3/demgpard.htm, nethelp3.3/demgpot.htm, + nethelp3.3/demgtm1.htm, nethelp3.3/demgtm2.htm, + nethelp3.3/demhint.htm, nethelp3.3/demhmc1.htm, + nethelp3.3/demhmc2.htm, nethelp3.3/demhmc3.htm, + nethelp3.3/demkmn1.htm, nethelp3.3/demknn1.htm, + nethelp3.3/demmdn1.htm, nethelp3.3/demmet1.htm, + nethelp3.3/demmlp1.htm, nethelp3.3/demmlp2.htm, + nethelp3.3/demnlab.htm, nethelp3.3/demns1.htm, + nethelp3.3/demolgd1.htm, nethelp3.3/demopt1.htm, + nethelp3.3/dempot.htm, nethelp3.3/demprgp.htm, + nethelp3.3/demprior.htm, nethelp3.3/demrbf1.htm, + nethelp3.3/demsom1.htm, nethelp3.3/demtrain.htm, + nethelp3.3/dist2.htm, nethelp3.3/eigdec.htm, + nethelp3.3/errbayes.htm, nethelp3.3/evidence.htm, + nethelp3.3/fevbayes.htm, nethelp3.3/gauss.htm, + nethelp3.3/gbayes.htm, nethelp3.3/glm.htm, + nethelp3.3/glmderiv.htm, nethelp3.3/glmerr.htm, + nethelp3.3/glmevfwd.htm, nethelp3.3/glmfwd.htm, + nethelp3.3/glmgrad.htm, nethelp3.3/glmhess.htm, + nethelp3.3/glminit.htm, nethelp3.3/glmpak.htm, + nethelp3.3/glmtrain.htm, nethelp3.3/glmunpak.htm, + nethelp3.3/gmm.htm, nethelp3.3/gmmactiv.htm, + nethelp3.3/gmmem.htm, nethelp3.3/gmminit.htm, + nethelp3.3/gmmpak.htm, nethelp3.3/gmmpost.htm, + nethelp3.3/gmmprob.htm, nethelp3.3/gmmsamp.htm, + nethelp3.3/gmmunpak.htm, nethelp3.3/gp.htm, + nethelp3.3/gpcovar.htm, nethelp3.3/gpcovarf.htm, + nethelp3.3/gpcovarp.htm, nethelp3.3/gperr.htm, + nethelp3.3/gpfwd.htm, nethelp3.3/gpgrad.htm, + nethelp3.3/gpinit.htm, nethelp3.3/gppak.htm, + nethelp3.3/gpunpak.htm, nethelp3.3/gradchek.htm, + nethelp3.3/graddesc.htm, nethelp3.3/gsamp.htm, + nethelp3.3/gtm.htm, nethelp3.3/gtmem.htm, nethelp3.3/gtmfwd.htm, + nethelp3.3/gtminit.htm, nethelp3.3/gtmlmean.htm, + nethelp3.3/gtmlmode.htm, nethelp3.3/gtmmag.htm, + nethelp3.3/gtmpost.htm, nethelp3.3/gtmprob.htm, + nethelp3.3/hbayes.htm, nethelp3.3/hesschek.htm, + nethelp3.3/hintmat.htm, nethelp3.3/hinton.htm, + nethelp3.3/histp.htm, nethelp3.3/hmc.htm, nethelp3.3/index.htm, + nethelp3.3/kmeans.htm, nethelp3.3/knn.htm, nethelp3.3/knnfwd.htm, + nethelp3.3/linef.htm, nethelp3.3/linemin.htm, + nethelp3.3/maxitmess.htm, nethelp3.3/mdn.htm, + nethelp3.3/mdn2gmm.htm, nethelp3.3/mdndist2.htm, + nethelp3.3/mdnerr.htm, nethelp3.3/mdnfwd.htm, + nethelp3.3/mdngrad.htm, nethelp3.3/mdninit.htm, + nethelp3.3/mdnpak.htm, nethelp3.3/mdnpost.htm, + nethelp3.3/mdnprob.htm, nethelp3.3/mdnunpak.htm, + nethelp3.3/metrop.htm, nethelp3.3/minbrack.htm, + nethelp3.3/mlp.htm, nethelp3.3/mlpbkp.htm, + nethelp3.3/mlpderiv.htm, nethelp3.3/mlperr.htm, + nethelp3.3/mlpevfwd.htm, nethelp3.3/mlpfwd.htm, + nethelp3.3/mlpgrad.htm, nethelp3.3/mlphdotv.htm, + nethelp3.3/mlphess.htm, nethelp3.3/mlphint.htm, + nethelp3.3/mlpinit.htm, nethelp3.3/mlppak.htm, + nethelp3.3/mlpprior.htm, nethelp3.3/mlptrain.htm, + nethelp3.3/mlpunpak.htm, nethelp3.3/netderiv.htm, + nethelp3.3/neterr.htm, nethelp3.3/netevfwd.htm, + nethelp3.3/netgrad.htm, nethelp3.3/nethelp3.3.zip, + nethelp3.3/nethess.htm, nethelp3.3/netinit.htm, + nethelp3.3/netopt.htm, nethelp3.3/netpak.htm, + nethelp3.3/netunpak.htm, nethelp3.3/olgd.htm, nethelp3.3/pca.htm, + nethelp3.3/plotmat.htm, nethelp3.3/ppca.htm, + nethelp3.3/quasinew.htm, nethelp3.3/rbf.htm, + nethelp3.3/rbfbkp.htm, nethelp3.3/rbfderiv.htm, + nethelp3.3/rbferr.htm, nethelp3.3/rbfevfwd.htm, + nethelp3.3/rbffwd.htm, nethelp3.3/rbfgrad.htm, + nethelp3.3/rbfhess.htm, nethelp3.3/rbfjacob.htm, + nethelp3.3/rbfpak.htm, nethelp3.3/rbfprior.htm, + nethelp3.3/rbfsetbf.htm, nethelp3.3/rbfsetfw.htm, + nethelp3.3/rbftrain.htm, nethelp3.3/rbfunpak.htm, + nethelp3.3/rosegrad.htm, nethelp3.3/rosen.htm, + nethelp3.3/scg.htm, nethelp3.3/som.htm, nethelp3.3/somfwd.htm, + nethelp3.3/sompak.htm, nethelp3.3/somtrain.htm, + nethelp3.3/somunpak.htm, netlab3.3/Contents.m, netlab3.3/LICENSE, + netlab3.3/conffig.m, netlab3.3/confmat.m, netlab3.3/conjgrad.m, + netlab3.3/consist.m, netlab3.3/convertoldnet.m, + netlab3.3/datread.m, netlab3.3/datwrite.m, netlab3.3/demard.m, + netlab3.3/demev1.m, netlab3.3/demev2.m, netlab3.3/demev3.m, + netlab3.3/demgauss.m, netlab3.3/demglm1.m, netlab3.3/demglm2.m, + netlab3.3/demgmm2.m, netlab3.3/demgmm3.m, netlab3.3/demgmm4.m, + netlab3.3/demgmm5.m, netlab3.3/demgp.m, netlab3.3/demgpard.m, + netlab3.3/demgpot.m, netlab3.3/demgtm1.m, netlab3.3/demgtm2.m, + netlab3.3/demhint.m, netlab3.3/demhmc2.m, netlab3.3/demhmc3.m, + netlab3.3/demkmn1.m, netlab3.3/demknn1.m, netlab3.3/demmdn1.m, + netlab3.3/demmlp1.m, netlab3.3/demmlp2.m, netlab3.3/demnlab.m, + netlab3.3/demns1.m, netlab3.3/demolgd1.m, netlab3.3/demopt1.m, + netlab3.3/dempot.m, netlab3.3/demprgp.m, netlab3.3/demprior.m, + netlab3.3/demrbf1.m, netlab3.3/demsom1.m, netlab3.3/demtrain.m, + netlab3.3/dist2.m, netlab3.3/eigdec.m, netlab3.3/errbayes.m, + netlab3.3/evidence.m, netlab3.3/fevbayes.m, netlab3.3/gauss.m, + netlab3.3/gbayes.m, netlab3.3/glm.m, netlab3.3/glmderiv.m, + netlab3.3/glmerr.m, netlab3.3/glmevfwd.m, netlab3.3/glmfwd.m, + netlab3.3/glmgrad.m, netlab3.3/glmhess.m, netlab3.3/glminit.m, + netlab3.3/glmpak.m, netlab3.3/glmtrain.m, netlab3.3/glmunpak.m, + netlab3.3/gmm.m, netlab3.3/gmmactiv.m, netlab3.3/gmmem.m, + netlab3.3/gmmpak.m, netlab3.3/gmmpost.m, netlab3.3/gmmprob.m, + netlab3.3/gmmsamp.m, netlab3.3/gmmunpak.m, netlab3.3/gp.m, + netlab3.3/gpcovar.m, netlab3.3/gpcovarf.m, netlab3.3/gpcovarp.m, + netlab3.3/gperr.m, netlab3.3/gpfwd.m, netlab3.3/gpgrad.m, + netlab3.3/gpinit.m, netlab3.3/gppak.m, netlab3.3/gpunpak.m, + netlab3.3/gradchek.m, netlab3.3/graddesc.m, netlab3.3/gsamp.m, + netlab3.3/gtm.m, netlab3.3/gtmem.m, netlab3.3/gtmfwd.m, + netlab3.3/gtminit.m, netlab3.3/gtmlmean.m, netlab3.3/gtmlmode.m, + netlab3.3/gtmmag.m, netlab3.3/gtmpost.m, netlab3.3/gtmprob.m, + netlab3.3/hbayes.m, netlab3.3/hesschek.m, netlab3.3/hintmat.m, + netlab3.3/hinton.m, netlab3.3/histp.m, netlab3.3/hmc.m, + netlab3.3/kmeansNetlab.m, netlab3.3/knn.m, netlab3.3/knnfwd.m, + netlab3.3/linef.m, netlab3.3/linemin.m, netlab3.3/maxitmess.m, + netlab3.3/mdn.m, netlab3.3/mdn2gmm.m, netlab3.3/mdndist2.m, + netlab3.3/mdnerr.m, netlab3.3/mdnfwd.m, netlab3.3/mdngrad.m, + netlab3.3/mdninit.m, netlab3.3/mdnnet.mat, netlab3.3/mdnpak.m, + netlab3.3/mdnpost.m, netlab3.3/mdnprob.m, netlab3.3/mdnunpak.m, + netlab3.3/minbrack.m, netlab3.3/mlp.m, netlab3.3/mlpbkp.m, + netlab3.3/mlpderiv.m, netlab3.3/mlperr.m, netlab3.3/mlpevfwd.m, + netlab3.3/mlpfwd.m, netlab3.3/mlpgrad.m, netlab3.3/mlphdotv.m, + netlab3.3/mlphess.m, netlab3.3/mlphint.m, netlab3.3/mlpinit.m, + netlab3.3/mlppak.m, netlab3.3/mlpprior.m, netlab3.3/mlptrain.m, + netlab3.3/mlpunpak.m, netlab3.3/netderiv.m, netlab3.3/neterr.m, + netlab3.3/netevfwd.m, netlab3.3/netgrad.m, netlab3.3/nethess.m, + netlab3.3/netinit.m, netlab3.3/netlab3.3.zip, + netlab3.3/netlogo.mat, netlab3.3/netopt.m, netlab3.3/netpak.m, + netlab3.3/netunpak.m, netlab3.3/oilTrn.dat, netlab3.3/oilTst.dat, + netlab3.3/olgd.m, netlab3.3/pca.m, netlab3.3/plotmat.m, + netlab3.3/ppca.m, netlab3.3/quasinew.m, netlab3.3/rbf.m, + netlab3.3/rbfbkp.m, netlab3.3/rbfderiv.m, netlab3.3/rbferr.m, + netlab3.3/rbfevfwd.m, netlab3.3/rbffwd.m, netlab3.3/rbfgrad.m, + netlab3.3/rbfhess.m, netlab3.3/rbfjacob.m, netlab3.3/rbfpak.m, + netlab3.3/rbfprior.m, netlab3.3/rbfsetbf.m, netlab3.3/rbfsetfw.m, + netlab3.3/rbftrain.m, netlab3.3/rbfunpak.m, netlab3.3/rosegrad.m, + netlab3.3/rosen.m, netlab3.3/scg.m, netlab3.3/som.m, + netlab3.3/somfwd.m, netlab3.3/sompak.m, netlab3.3/somtrain.m, + netlab3.3/somunpak.m, netlab3.3/xor.dat, netlabKPM/README.txt, + netlabKPM/demgmm1_movie.m, netlabKPM/evidence_weighted.m, + netlabKPM/glmerr_weighted.m, netlabKPM/glmgrad_weighted.m, + netlabKPM/glmhess_weighted.m, netlabKPM/glmtrain_weighted.m, + netlabKPM/gmm1.avi, netlabKPM/gmmem2.m, + netlabKPM/gmmem_multi_restart.m, netlabKPM/kmeans_demo.m, + netlabKPM/mlperr_weighted.m, netlabKPM/mlpgrad_weighted.m, + netlabKPM/mlphdotv_weighted.m, netlabKPM/mlphess_weighted.m, + netlabKPM/neterr_weighted.m, netlabKPM/netgrad_weighted.m, + netlabKPM/nethess_weighted.m, netlabKPM/netopt_weighted.m, + netlabKPM/process_options.m: Initial revision + +2005-04-25 19:29 yozhik + + * KPMstats/KLgauss.m, KPMstats/README.txt, KPMstats/beta_sample.m, + KPMstats/chisquared_histo.m, KPMstats/chisquared_prob.m, + KPMstats/chisquared_readme.txt, KPMstats/chisquared_table.m, + KPMstats/clg_Mstep.m, KPMstats/clg_Mstep_simple.m, + KPMstats/clg_prob.m, KPMstats/condGaussToJoint.m, + KPMstats/cond_indep_fisher_z.m, + KPMstats/condgaussTrainObserved.m, KPMstats/condgauss_sample.m, + KPMstats/convertBinaryLabels.m, KPMstats/cwr_demo.m, + KPMstats/cwr_em.m, KPMstats/cwr_predict.m, KPMstats/cwr_prob.m, + KPMstats/cwr_readme.txt, KPMstats/cwr_test.m, + KPMstats/dirichlet_sample.m, KPMstats/distchck.m, + KPMstats/eigdec.m, KPMstats/est_transmat.m, + KPMstats/fit_paritioned_model_testfn.m, + KPMstats/fit_partitioned_model.m, KPMstats/gamma_sample.m, + KPMstats/gaussian_prob.m, KPMstats/gaussian_sample.m, + KPMstats/linear_regression.m, KPMstats/logist2.m, + KPMstats/logist2Apply.m, KPMstats/logist2ApplyRegularized.m, + KPMstats/logist2Fit.m, KPMstats/logist2FitRegularized.m, + KPMstats/logistK.m, KPMstats/logistK_eval.m, + KPMstats/marginalize_gaussian.m, KPMstats/matrix_T_pdf.m, + KPMstats/matrix_normal_pdf.m, KPMstats/mc_stat_distrib.m, + KPMstats/mixgauss_Mstep.m, KPMstats/mixgauss_classifier_apply.m, + KPMstats/mixgauss_classifier_train.m, KPMstats/mixgauss_em.m, + KPMstats/mixgauss_init.m, KPMstats/mixgauss_prob.m, + KPMstats/mixgauss_prob_test.m, KPMstats/mixgauss_sample.m, + KPMstats/mkPolyFvec.m, KPMstats/mk_unit_norm.m, + KPMstats/multinomial_prob.m, KPMstats/multinomial_sample.m, + KPMstats/normal_coef.m, KPMstats/partial_corr_coef.m, + KPMstats/parzen.m, KPMstats/parzenC.c, KPMstats/parzenC.dll, + KPMstats/parzenC.mexglx, KPMstats/parzenC_test.m, + KPMstats/parzen_fit_select_unif.m, KPMstats/pca.m, + KPMstats/rndcheck.m, KPMstats/sample.m, + KPMstats/sample_discrete.m, KPMstats/sample_gaussian.m, + KPMstats/student_t_logprob.m, KPMstats/student_t_prob.m, + KPMstats/unif_discrete_sample.m, KPMstats/weightedRegression.m, + KPMtools/README.txt, KPMtools/approx_unique.m, + KPMtools/approxeq.m, KPMtools/argmax.m, KPMtools/argmin.m, + KPMtools/assert.m, KPMtools/assignEdgeNums.m, + KPMtools/assign_cols.m, KPMtools/axis_pct.m, KPMtools/block.m, + KPMtools/cell2num.m, KPMtools/chi2inv.m, KPMtools/choose.m, + KPMtools/collapse_mog.m, KPMtools/colmult.c, + KPMtools/colmult.mexglx, KPMtools/computeROC.m, + KPMtools/compute_counts.m, KPMtools/conf2mahal.m, + KPMtools/cross_entropy.m, KPMtools/div.m, KPMtools/draw_circle.m, + KPMtools/draw_ellipse.m, KPMtools/draw_ellipse_axes.m, + KPMtools/em_converged.m, KPMtools/entropy.m, + KPMtools/exportfig.m, KPMtools/extend_domain_table.m, + KPMtools/factorial.m, KPMtools/find_equiv_posns.m, + KPMtools/hash_add.m, KPMtools/hash_del.m, KPMtools/hash_lookup.m, + KPMtools/hungarian.m, KPMtools/image_rgb.m, + KPMtools/imresizeAspect.m, KPMtools/ind2subv.c, + KPMtools/ind2subv.m, KPMtools/installC_KPMtools.m, + KPMtools/is_psd.m, KPMtools/is_stochastic.m, + KPMtools/isemptycell.m, KPMtools/isposdef.m, KPMtools/isscalar.m, + KPMtools/isvector.m, KPMtools/junk.c, KPMtools/loadcell.m, + KPMtools/logb.m, KPMtools/logdet.m, KPMtools/logsum.m, + KPMtools/logsum_simple.m, KPMtools/logsum_test.m, + KPMtools/logsumexp.m, KPMtools/logsumexpv.m, + KPMtools/marg_table.m, KPMtools/marginalize_table.m, + KPMtools/matprint.m, KPMtools/max_mult.c, KPMtools/max_mult.m, + KPMtools/mexutil.c, KPMtools/mexutil.h, + KPMtools/mk_multi_index.m, KPMtools/mk_stochastic.m, + KPMtools/mult_by_table.m, KPMtools/myintersect.m, + KPMtools/myismember.m, KPMtools/myones.m, KPMtools/myplot.m, + KPMtools/myrand.m, KPMtools/myrepmat.m, KPMtools/myreshape.m, + KPMtools/mysetdiff.m, KPMtools/mysize.m, KPMtools/mysubset.m, + KPMtools/mysymsetdiff.m, KPMtools/bipartiteMatchingHungarian.m, + KPMtools/myunion.m, KPMtools/nchoose2.m, KPMtools/ncols.m, + KPMtools/nonmaxsup.m, KPMtools/normalise.m, + KPMtools/normaliseC.c, KPMtools/normaliseC.dll, + KPMtools/normalize.m, KPMtools/nrows.m, KPMtools/num2strcell.m, + KPMtools/partitionData.m, KPMtools/partition_matrix_vec.m, + KPMtools/pca_netlab.m, KPMtools/pick.m, KPMtools/plotROC.m, + KPMtools/plotROCkpm.m, KPMtools/plot_axis_thru_origin.m, + KPMtools/plot_ellipse.m, KPMtools/plot_matrix.m, + KPMtools/plot_polygon.m, KPMtools/plotcov2.m, + KPMtools/plotcov3.m, KPMtools/plotgauss1d.m, + KPMtools/plotgauss2d.m, KPMtools/plotgauss2d_old.m, + KPMtools/polygon_area.m, KPMtools/polygon_centroid.m, + KPMtools/polygon_intersect.m, KPMtools/previewfig.m, + KPMtools/process_options.m, KPMtools/rand_psd.m, + KPMtools/rectintC.m, KPMtools/rectintLoopC.mexglx, + KPMtools/rectintSparse.m, KPMtools/rectintSparseC.m, + KPMtools/rectintSparseLoopC.c, KPMtools/rectintSparseLoopC.dll, + KPMtools/repmatC.c, KPMtools/repmatC.dll, + KPMtools/repmatC.mexglx, KPMtools/rgb2grayKPM.m, + KPMtools/rnd_partition.m, KPMtools/rotate_xlabel.m, + KPMtools/safeStr.m, KPMtools/sampleUniformInts.m, + KPMtools/sample_discrete.m, KPMtools/set_xtick_label.m, + KPMtools/set_xtick_label_demo.m, KPMtools/setdiag.m, + KPMtools/softeye.m, KPMtools/sort_evec.m, + KPMtools/splitLongSeqIntoManyShort.m, KPMtools/sprintf_intvec.m, + KPMtools/sqdist.m, KPMtools/strmatch_multi.m, + KPMtools/strmatch_substr.m, KPMtools/subplot2.m, + KPMtools/subplot3.m, KPMtools/subsets.m, KPMtools/subv2ind.c, + KPMtools/subv2ind.m, KPMtools/sumv.m, KPMtools/suptitle.m, + KPMtools/unaryEncoding.m, KPMtools/wrap.m, + KPMtools/xticklabel_rotate90.m, KPMtools/zipload.m, + KPMtools/zipsave.m: Initial import of code base from Kevin + Murphy. + +2005-04-25 19:29 yozhik + + * KPMstats/KLgauss.m, KPMstats/README.txt, KPMstats/beta_sample.m, + KPMstats/chisquared_histo.m, KPMstats/chisquared_prob.m, + KPMstats/chisquared_readme.txt, KPMstats/chisquared_table.m, + KPMstats/clg_Mstep.m, KPMstats/clg_Mstep_simple.m, + KPMstats/clg_prob.m, KPMstats/condGaussToJoint.m, + KPMstats/cond_indep_fisher_z.m, + KPMstats/condgaussTrainObserved.m, KPMstats/condgauss_sample.m, + KPMstats/convertBinaryLabels.m, KPMstats/cwr_demo.m, + KPMstats/cwr_em.m, KPMstats/cwr_predict.m, KPMstats/cwr_prob.m, + KPMstats/cwr_readme.txt, KPMstats/cwr_test.m, + KPMstats/dirichlet_sample.m, KPMstats/distchck.m, + KPMstats/eigdec.m, KPMstats/est_transmat.m, + KPMstats/fit_paritioned_model_testfn.m, + KPMstats/fit_partitioned_model.m, KPMstats/gamma_sample.m, + KPMstats/gaussian_prob.m, KPMstats/gaussian_sample.m, + KPMstats/linear_regression.m, KPMstats/logist2.m, + KPMstats/logist2Apply.m, KPMstats/logist2ApplyRegularized.m, + KPMstats/logist2Fit.m, KPMstats/logist2FitRegularized.m, + KPMstats/logistK.m, KPMstats/logistK_eval.m, + KPMstats/marginalize_gaussian.m, KPMstats/matrix_T_pdf.m, + KPMstats/matrix_normal_pdf.m, KPMstats/mc_stat_distrib.m, + KPMstats/mixgauss_Mstep.m, KPMstats/mixgauss_classifier_apply.m, + KPMstats/mixgauss_classifier_train.m, KPMstats/mixgauss_em.m, + KPMstats/mixgauss_init.m, KPMstats/mixgauss_prob.m, + KPMstats/mixgauss_prob_test.m, KPMstats/mixgauss_sample.m, + KPMstats/mkPolyFvec.m, KPMstats/mk_unit_norm.m, + KPMstats/multinomial_prob.m, KPMstats/multinomial_sample.m, + KPMstats/normal_coef.m, KPMstats/partial_corr_coef.m, + KPMstats/parzen.m, KPMstats/parzenC.c, KPMstats/parzenC.dll, + KPMstats/parzenC.mexglx, KPMstats/parzenC_test.m, + KPMstats/parzen_fit_select_unif.m, KPMstats/pca.m, + KPMstats/rndcheck.m, KPMstats/sample.m, + KPMstats/sample_discrete.m, KPMstats/sample_gaussian.m, + KPMstats/student_t_logprob.m, KPMstats/student_t_prob.m, + KPMstats/unif_discrete_sample.m, KPMstats/weightedRegression.m, + KPMtools/README.txt, KPMtools/approx_unique.m, + KPMtools/approxeq.m, KPMtools/argmax.m, KPMtools/argmin.m, + KPMtools/assert.m, KPMtools/assignEdgeNums.m, + KPMtools/assign_cols.m, KPMtools/axis_pct.m, KPMtools/block.m, + KPMtools/cell2num.m, KPMtools/chi2inv.m, KPMtools/choose.m, + KPMtools/collapse_mog.m, KPMtools/colmult.c, + KPMtools/colmult.mexglx, KPMtools/computeROC.m, + KPMtools/compute_counts.m, KPMtools/conf2mahal.m, + KPMtools/cross_entropy.m, KPMtools/div.m, KPMtools/draw_circle.m, + KPMtools/draw_ellipse.m, KPMtools/draw_ellipse_axes.m, + KPMtools/em_converged.m, KPMtools/entropy.m, + KPMtools/exportfig.m, KPMtools/extend_domain_table.m, + KPMtools/factorial.m, KPMtools/find_equiv_posns.m, + KPMtools/hash_add.m, KPMtools/hash_del.m, KPMtools/hash_lookup.m, + KPMtools/hungarian.m, KPMtools/image_rgb.m, + KPMtools/imresizeAspect.m, KPMtools/ind2subv.c, + KPMtools/ind2subv.m, KPMtools/installC_KPMtools.m, + KPMtools/is_psd.m, KPMtools/is_stochastic.m, + KPMtools/isemptycell.m, KPMtools/isposdef.m, KPMtools/isscalar.m, + KPMtools/isvector.m, KPMtools/junk.c, KPMtools/loadcell.m, + KPMtools/logb.m, KPMtools/logdet.m, KPMtools/logsum.m, + KPMtools/logsum_simple.m, KPMtools/logsum_test.m, + KPMtools/logsumexp.m, KPMtools/logsumexpv.m, + KPMtools/marg_table.m, KPMtools/marginalize_table.m, + KPMtools/matprint.m, KPMtools/max_mult.c, KPMtools/max_mult.m, + KPMtools/mexutil.c, KPMtools/mexutil.h, + KPMtools/mk_multi_index.m, KPMtools/mk_stochastic.m, + KPMtools/mult_by_table.m, KPMtools/myintersect.m, + KPMtools/myismember.m, KPMtools/myones.m, KPMtools/myplot.m, + KPMtools/myrand.m, KPMtools/myrepmat.m, KPMtools/myreshape.m, + KPMtools/mysetdiff.m, KPMtools/mysize.m, KPMtools/mysubset.m, + KPMtools/mysymsetdiff.m, KPMtools/bipartiteMatchingHungarian.m, + KPMtools/myunion.m, KPMtools/nchoose2.m, KPMtools/ncols.m, + KPMtools/nonmaxsup.m, KPMtools/normalise.m, + KPMtools/normaliseC.c, KPMtools/normaliseC.dll, + KPMtools/normalize.m, KPMtools/nrows.m, KPMtools/num2strcell.m, + KPMtools/partitionData.m, KPMtools/partition_matrix_vec.m, + KPMtools/pca_netlab.m, KPMtools/pick.m, KPMtools/plotROC.m, + KPMtools/plotROCkpm.m, KPMtools/plot_axis_thru_origin.m, + KPMtools/plot_ellipse.m, KPMtools/plot_matrix.m, + KPMtools/plot_polygon.m, KPMtools/plotcov2.m, + KPMtools/plotcov3.m, KPMtools/plotgauss1d.m, + KPMtools/plotgauss2d.m, KPMtools/plotgauss2d_old.m, + KPMtools/polygon_area.m, KPMtools/polygon_centroid.m, + KPMtools/polygon_intersect.m, KPMtools/previewfig.m, + KPMtools/process_options.m, KPMtools/rand_psd.m, + KPMtools/rectintC.m, KPMtools/rectintLoopC.mexglx, + KPMtools/rectintSparse.m, KPMtools/rectintSparseC.m, + KPMtools/rectintSparseLoopC.c, KPMtools/rectintSparseLoopC.dll, + KPMtools/repmatC.c, KPMtools/repmatC.dll, + KPMtools/repmatC.mexglx, KPMtools/rgb2grayKPM.m, + KPMtools/rnd_partition.m, KPMtools/rotate_xlabel.m, + KPMtools/safeStr.m, KPMtools/sampleUniformInts.m, + KPMtools/sample_discrete.m, KPMtools/set_xtick_label.m, + KPMtools/set_xtick_label_demo.m, KPMtools/setdiag.m, + KPMtools/softeye.m, KPMtools/sort_evec.m, + KPMtools/splitLongSeqIntoManyShort.m, KPMtools/sprintf_intvec.m, + KPMtools/sqdist.m, KPMtools/strmatch_multi.m, + KPMtools/strmatch_substr.m, KPMtools/subplot2.m, + KPMtools/subplot3.m, KPMtools/subsets.m, KPMtools/subv2ind.c, + KPMtools/subv2ind.m, KPMtools/sumv.m, KPMtools/suptitle.m, + KPMtools/unaryEncoding.m, KPMtools/wrap.m, + KPMtools/xticklabel_rotate90.m, KPMtools/zipload.m, + KPMtools/zipsave.m: Initial revision + +2005-04-03 18:39 yozhik + + * BNT/learning/score_dags.m: Initial import of code base from Kevin + Murphy. + +2005-04-03 18:39 yozhik + + * BNT/learning/score_dags.m: Initial revision + +2005-03-31 11:20 yozhik + + * BNT/installC_BNT.m: Initial import of code base from Kevin + Murphy. + +2005-03-31 11:20 yozhik + + * BNT/installC_BNT.m: Initial revision + +2005-03-30 11:59 yozhik + + * KPMtools/asdemo.html: Initial import of code base from Kevin + Murphy. + +2005-03-30 11:59 yozhik + + * KPMtools/asdemo.html: Initial revision + +2005-03-26 18:51 yozhik + + * KPMtools/asdemo.m: Initial import of code base from Kevin Murphy. + +2005-03-26 18:51 yozhik + + * KPMtools/asdemo.m: Initial revision + +2005-01-15 18:27 yozhik + + * BNT/CPDs/@tabular_CPD/: get_field.m, set_fields.m, tabular_CPD.m: + Initial import of code base from Kevin Murphy. + +2005-01-15 18:27 yozhik + + * BNT/CPDs/@tabular_CPD/: get_field.m, set_fields.m, tabular_CPD.m: + Initial revision + +2004-11-24 12:12 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/junk: Initial import of + code base from Kevin Murphy. + +2004-11-24 12:12 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/junk: Initial revision + +2004-11-22 14:41 yozhik + + * BNT/examples/dynamic/orig_water1.m: Initial import of code base + from Kevin Murphy. + +2004-11-22 14:41 yozhik + + * BNT/examples/dynamic/orig_water1.m: Initial revision + +2004-11-22 14:15 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/cbk_inf_engine.m: Initial + import of code base from Kevin Murphy. + +2004-11-22 14:15 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/cbk_inf_engine.m: Initial + revision + +2004-11-06 13:52 yozhik + + * BNT/examples/static/StructLearn/model_select2.m: Initial import + of code base from Kevin Murphy. + +2004-11-06 13:52 yozhik + + * BNT/examples/static/StructLearn/model_select2.m: Initial revision + +2004-11-06 12:55 yozhik + + * BNT/examples/static/StructLearn/model_select1.m: Initial import + of code base from Kevin Murphy. + +2004-11-06 12:55 yozhik + + * BNT/examples/static/StructLearn/model_select1.m: Initial revision + +2004-10-22 18:18 yozhik + + * HMM/viterbi_path.m: Initial import of code base from Kevin + Murphy. + +2004-10-22 18:18 yozhik + + * HMM/viterbi_path.m: Initial revision + +2004-09-29 20:09 yozhik + + * BNT/inference/static/@var_elim_inf_engine/marginal_nodes.m: + Initial import of code base from Kevin Murphy. + +2004-09-29 20:09 yozhik + + * BNT/inference/static/@var_elim_inf_engine/marginal_nodes.m: + Initial revision + +2004-09-12 20:21 yozhik + + * BNT/examples/limids/amnio.m: Initial import of code base from + Kevin Murphy. + +2004-09-12 20:21 yozhik + + * BNT/examples/limids/amnio.m: Initial revision + +2004-09-12 19:27 yozhik + + * BNT/examples/limids/oil1.m: Initial import of code base from + Kevin Murphy. + +2004-09-12 19:27 yozhik + + * BNT/examples/limids/oil1.m: Initial revision + +2004-09-12 14:01 yozhik + + * BNT/examples/static/sprinkler1.m: Initial import of code base + from Kevin Murphy. + +2004-09-12 14:01 yozhik + + * BNT/examples/static/sprinkler1.m: Initial revision + +2004-08-29 05:41 yozhik + + * HMM/transmat_train_observed.m: Initial import of code base from + Kevin Murphy. + +2004-08-29 05:41 yozhik + + * HMM/transmat_train_observed.m: Initial revision + +2004-08-05 08:25 yozhik + + * BNT/potentials/: @dpot/divide_by_pot.m, Tables/divide_by_table.m: + Initial import of code base from Kevin Murphy. + +2004-08-05 08:25 yozhik + + * BNT/potentials/: @dpot/divide_by_pot.m, Tables/divide_by_table.m: + Initial revision + +2004-08-04 12:59 yozhik + + * BNT/potentials/@dpot/: marginalize_pot.m, multiply_by_pot.m: + Initial import of code base from Kevin Murphy. + +2004-08-04 12:59 yozhik + + * BNT/potentials/@dpot/: marginalize_pot.m, multiply_by_pot.m: + Initial revision + +2004-08-04 12:36 yozhik + + * BNT/@assocarray/subsref.m: Initial import of code base from Kevin + Murphy. + +2004-08-04 12:36 yozhik + + * BNT/@assocarray/subsref.m: Initial revision + +2004-08-04 08:54 yozhik + + * BNT/potentials/@dpot/normalize_pot.m: Initial import of code base + from Kevin Murphy. + +2004-08-04 08:54 yozhik + + * BNT/potentials/@dpot/normalize_pot.m: Initial revision + +2004-08-04 08:51 yozhik + + * BNT/potentials/Tables/: marg_table.m, mult_by_table.m, + extend_domain_table.m: Initial import of code base from Kevin + Murphy. + +2004-08-04 08:51 yozhik + + * BNT/potentials/Tables/: marg_table.m, mult_by_table.m, + extend_domain_table.m: Initial revision + +2004-08-02 15:23 yozhik + + * BNT/CPDs/@noisyor_CPD/CPD_to_CPT.m: Initial import of code base + from Kevin Murphy. + +2004-08-02 15:23 yozhik + + * BNT/CPDs/@noisyor_CPD/CPD_to_CPT.m: Initial revision + +2004-08-02 15:05 yozhik + + * BNT/general/noisyORtoTable.m: Initial import of code base from + Kevin Murphy. + +2004-08-02 15:05 yozhik + + * BNT/general/noisyORtoTable.m: Initial revision + +2004-06-29 10:46 yozhik + + * BNT/learning/learn_struct_pdag_pc.m: Initial import of code base + from Kevin Murphy. + +2004-06-29 10:46 yozhik + + * BNT/learning/learn_struct_pdag_pc.m: Initial revision + +2004-06-15 10:50 yozhik + + * GraphViz/graph_to_dot.m: Initial import of code base from Kevin + Murphy. + +2004-06-15 10:50 yozhik + + * GraphViz/graph_to_dot.m: Initial revision + +2004-06-11 14:16 yozhik + + * BNT/CPDs/@tabular_CPD/log_marg_prob_node.m: Initial import of + code base from Kevin Murphy. + +2004-06-11 14:16 yozhik + + * BNT/CPDs/@tabular_CPD/log_marg_prob_node.m: Initial revision + +2004-06-09 18:56 yozhik + + * BNT/README.txt: Initial import of code base from Kevin Murphy. + +2004-06-09 18:56 yozhik + + * BNT/README.txt: Initial revision + +2004-06-09 18:53 yozhik + + * BNT/CPDs/@generic_CPD/learn_params.m: Initial import of code base + from Kevin Murphy. + +2004-06-09 18:53 yozhik + + * BNT/CPDs/@generic_CPD/learn_params.m: Initial revision + +2004-06-09 18:42 yozhik + + * BNT/examples/static/nodeorderExample.m: Initial import of code + base from Kevin Murphy. + +2004-06-09 18:42 yozhik + + * BNT/examples/static/nodeorderExample.m: Initial revision + +2004-06-09 18:33 yozhik + + * BNT/: learning/score_family.m, test_BNT.m: Initial import of code + base from Kevin Murphy. + +2004-06-09 18:33 yozhik + + * BNT/: learning/score_family.m, test_BNT.m: Initial revision + +2004-06-09 18:28 yozhik + + * BNT/: learning/learn_params.m, CPDs/@gaussian_CPD/learn_params.m, + examples/static/gaussian2.m: Initial import of code base from + Kevin Murphy. + +2004-06-09 18:28 yozhik + + * BNT/: learning/learn_params.m, CPDs/@gaussian_CPD/learn_params.m, + examples/static/gaussian2.m: Initial revision + +2004-06-09 18:25 yozhik + + * BNT/CPDs/@tabular_CPD/learn_params.m: Initial import of code base + from Kevin Murphy. + +2004-06-09 18:25 yozhik + + * BNT/CPDs/@tabular_CPD/learn_params.m: Initial revision + +2004-06-09 18:17 yozhik + + * BNT/general/sample_bnet.m: Initial import of code base from Kevin + Murphy. + +2004-06-09 18:17 yozhik + + * BNT/general/sample_bnet.m: Initial revision + +2004-06-07 12:45 yozhik + + * BNT/examples/static/discrete1.m: Initial import of code base from + Kevin Murphy. + +2004-06-07 12:45 yozhik + + * BNT/examples/static/discrete1.m: Initial revision + +2004-06-07 12:04 yozhik + + * BNT/: inference/static/@global_joint_inf_engine/marginal_nodes.m, + inference/static/@global_joint_inf_engine/enter_evidence.m, + examples/dynamic/mk_bat_dbn.m: Initial import of code base from + Kevin Murphy. + +2004-06-07 12:04 yozhik + + * BNT/: inference/static/@global_joint_inf_engine/marginal_nodes.m, + inference/static/@global_joint_inf_engine/enter_evidence.m, + examples/dynamic/mk_bat_dbn.m: Initial revision + +2004-06-07 08:53 yozhik + + * BNT/examples/limids/asia_dt1.m: Initial import of code base from + Kevin Murphy. + +2004-06-07 08:53 yozhik + + * BNT/examples/limids/asia_dt1.m: Initial revision + +2004-06-07 08:48 yozhik + + * BNT/general/: solve_limid.m, compute_joint_pot.m: Initial import + of code base from Kevin Murphy. + +2004-06-07 08:48 yozhik + + * BNT/general/: solve_limid.m, compute_joint_pot.m: Initial + revision + +2004-06-07 07:39 yozhik + + * Kalman/README.txt: Initial import of code base from Kevin Murphy. + +2004-06-07 07:39 yozhik + + * Kalman/README.txt: Initial revision + +2004-06-07 07:33 yozhik + + * GraphViz/README.txt: Initial import of code base from Kevin + Murphy. + +2004-06-07 07:33 yozhik + + * GraphViz/README.txt: Initial revision + +2004-05-31 15:19 yozhik + + * HMM/dhmm_sample.m: Initial import of code base from Kevin Murphy. + +2004-05-31 15:19 yozhik + + * HMM/dhmm_sample.m: Initial revision + +2004-05-25 17:32 yozhik + + * HMM/mhmm_sample.m: Initial import of code base from Kevin Murphy. + +2004-05-25 17:32 yozhik + + * HMM/mhmm_sample.m: Initial revision + +2004-05-24 15:26 yozhik + + * HMM/mc_sample.m: Initial import of code base from Kevin Murphy. + +2004-05-24 15:26 yozhik + + * HMM/mc_sample.m: Initial revision + +2004-05-18 07:50 yozhik + + * BNT/installC_graph.m: Initial import of code base from Kevin + Murphy. + +2004-05-18 07:50 yozhik + + * BNT/installC_graph.m: Initial revision + +2004-05-13 18:13 yozhik + + * BNT/inference/static/@gaussian_inf_engine/gaussian_inf_engine.m: + Initial import of code base from Kevin Murphy. + +2004-05-13 18:13 yozhik + + * BNT/inference/static/@gaussian_inf_engine/gaussian_inf_engine.m: + Initial revision + +2004-05-11 12:23 yozhik + + * BNT/examples/dynamic/mk_chmm.m: Initial import of code base from + Kevin Murphy. + +2004-05-11 12:23 yozhik + + * BNT/examples/dynamic/mk_chmm.m: Initial revision + +2004-05-11 11:45 yozhik + + * BNT/examples/dynamic/mk_water_dbn.m: Initial import of code base + from Kevin Murphy. + +2004-05-11 11:45 yozhik + + * BNT/examples/dynamic/mk_water_dbn.m: Initial revision + +2004-05-05 06:32 yozhik + + * GraphViz/draw_dot.m: Initial import of code base from Kevin + Murphy. + +2004-05-05 06:32 yozhik + + * GraphViz/draw_dot.m: Initial revision + +2004-03-30 09:18 yozhik + + * BNT/: general/mk_named_CPT.m, + CPDs/@softmax_CPD/convert_to_table.m: Initial import of code base + from Kevin Murphy. + +2004-03-30 09:18 yozhik + + * BNT/: general/mk_named_CPT.m, + CPDs/@softmax_CPD/convert_to_table.m: Initial revision + +2004-03-22 14:32 yozhik + + * GraphViz/draw_graph.m: Initial import of code base from Kevin + Murphy. + +2004-03-22 14:32 yozhik + + * GraphViz/draw_graph.m: Initial revision + +2004-03-12 15:21 yozhik + + * GraphViz/dot_to_graph.m: Initial import of code base from Kevin + Murphy. + +2004-03-12 15:21 yozhik + + * GraphViz/dot_to_graph.m: Initial revision + +2004-03-04 14:34 yozhik + + * BNT/examples/static/burglary.m: Initial import of code base from + Kevin Murphy. + +2004-03-04 14:34 yozhik + + * BNT/examples/static/burglary.m: Initial revision + +2004-03-04 14:27 yozhik + + * BNT/examples/static/burglar-alarm-net.lisp.txt: Initial import of + code base from Kevin Murphy. + +2004-03-04 14:27 yozhik + + * BNT/examples/static/burglar-alarm-net.lisp.txt: Initial revision + +2004-02-28 09:25 yozhik + + * BNT/examples/static/learn1.m: Initial import of code base from + Kevin Murphy. + +2004-02-28 09:25 yozhik + + * BNT/examples/static/learn1.m: Initial revision + +2004-02-22 11:43 yozhik + + * BNT/examples/static/brainy.m: Initial import of code base from + Kevin Murphy. + +2004-02-22 11:43 yozhik + + * BNT/examples/static/brainy.m: Initial revision + +2004-02-20 14:00 yozhik + + * BNT/CPDs/@discrete_CPD/convert_to_pot.m: Initial import of code + base from Kevin Murphy. + +2004-02-20 14:00 yozhik + + * BNT/CPDs/@discrete_CPD/convert_to_pot.m: Initial revision + +2004-02-18 17:12 yozhik + + * + BNT/inference/dynamic/@jtree_dbn_inf_engine/enter_soft_evidence.m: + Initial import of code base from Kevin Murphy. + +2004-02-18 17:12 yozhik + + * + BNT/inference/dynamic/@jtree_dbn_inf_engine/enter_soft_evidence.m: + Initial revision + +2004-02-13 18:06 yozhik + + * HMM/mhmmParzen_train_observed.m: Initial import of code base from + Kevin Murphy. + +2004-02-13 18:06 yozhik + + * HMM/mhmmParzen_train_observed.m: Initial revision + +2004-02-12 15:08 yozhik + + * HMM/gausshmm_train_observed.m: Initial import of code base from + Kevin Murphy. + +2004-02-12 15:08 yozhik + + * HMM/gausshmm_train_observed.m: Initial revision + +2004-02-12 04:57 yozhik + + * BNT/examples/static/HME/hmemenu.m: Initial import of code base + from Kevin Murphy. + +2004-02-12 04:57 yozhik + + * BNT/examples/static/HME/hmemenu.m: Initial revision + +2004-02-07 20:52 yozhik + + * HMM/mhmm_em.m: Initial import of code base from Kevin Murphy. + +2004-02-07 20:52 yozhik + + * HMM/mhmm_em.m: Initial revision + +2004-02-04 15:53 yozhik + + * BNT/examples/dynamic/mk_orig_bat_dbn.m: Initial import of code + base from Kevin Murphy. + +2004-02-04 15:53 yozhik + + * BNT/examples/dynamic/mk_orig_bat_dbn.m: Initial revision + +2004-02-03 23:42 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/enter_soft_evidence.m: + Initial import of code base from Kevin Murphy. + +2004-02-03 23:42 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/enter_soft_evidence.m: + Initial revision + +2004-02-03 09:15 yozhik + + * GraphViz/Old/graphToDot.m: Initial import of code base from Kevin + Murphy. + +2004-02-03 09:15 yozhik + + * GraphViz/Old/graphToDot.m: Initial revision + +2004-01-30 18:57 yozhik + + * BNT/examples/dynamic/mk_orig_water_dbn.m: Initial import of code + base from Kevin Murphy. + +2004-01-30 18:57 yozhik + + * BNT/examples/dynamic/mk_orig_water_dbn.m: Initial revision + +2004-01-27 13:08 yozhik + + * GraphViz/: my_call.m, editGraphGUI.m: Initial import of code base + from Kevin Murphy. + +2004-01-27 13:08 yozhik + + * GraphViz/: my_call.m, editGraphGUI.m: Initial revision + +2004-01-27 13:01 yozhik + + * GraphViz/Old/: dot_to_graph.m, draw_graph.m: Initial import of + code base from Kevin Murphy. + +2004-01-27 13:01 yozhik + + * GraphViz/Old/: dot_to_graph.m, draw_graph.m: Initial revision + +2004-01-27 12:47 yozhik + + * GraphViz/Old/pre_pesha_graph_to_dot.m: Initial import of code + base from Kevin Murphy. + +2004-01-27 12:47 yozhik + + * GraphViz/Old/pre_pesha_graph_to_dot.m: Initial revision + +2004-01-27 12:42 yozhik + + * GraphViz/Old/draw_dot.m: Initial import of code base from Kevin + Murphy. + +2004-01-27 12:42 yozhik + + * GraphViz/Old/draw_dot.m: Initial revision + +2004-01-14 17:06 yozhik + + * BNT/examples/static/Models/mk_hmm_bnet.m: Initial import of code + base from Kevin Murphy. + +2004-01-14 17:06 yozhik + + * BNT/examples/static/Models/mk_hmm_bnet.m: Initial revision + +2004-01-12 12:53 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/enter_evidence.m: Initial + import of code base from Kevin Murphy. + +2004-01-12 12:53 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/enter_evidence.m: Initial + revision + +2004-01-04 17:23 yozhik + + * BNT/inference/static/@belprop_mrf2_inf_engine/bp_mrf2.m: Initial + import of code base from Kevin Murphy. + +2004-01-04 17:23 yozhik + + * BNT/inference/static/@belprop_mrf2_inf_engine/bp_mrf2.m: Initial + revision + +2003-12-15 22:17 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/marginal_nodes.m: Initial + import of code base from Kevin Murphy. + +2003-12-15 22:17 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/marginal_nodes.m: Initial + revision + +2003-10-31 14:37 yozhik + + * BNT/inference/static/@jtree_inf_engine/jtree_inf_engine.m: + Initial import of code base from Kevin Murphy. + +2003-10-31 14:37 yozhik + + * BNT/inference/static/@jtree_inf_engine/jtree_inf_engine.m: + Initial revision + +2003-09-05 07:06 yozhik + + * BNT/learning/learn_struct_mcmc.m: Initial import of code base + from Kevin Murphy. + +2003-09-05 07:06 yozhik + + * BNT/learning/learn_struct_mcmc.m: Initial revision + +2003-08-18 14:50 yozhik + + * BNT/learning/learn_params_dbn_em.m: Initial import of code base + from Kevin Murphy. + +2003-08-18 14:50 yozhik + + * BNT/learning/learn_params_dbn_em.m: Initial revision + +2003-07-30 06:37 yozhik + + * BNT/potentials/: @mpot/set_domain_pot.m, + @cgpot/Old/set_domain_pot.m, @cgpot/set_domain_pot.m: Initial + import of code base from Kevin Murphy. + +2003-07-30 06:37 yozhik + + * BNT/potentials/: @mpot/set_domain_pot.m, + @cgpot/Old/set_domain_pot.m, @cgpot/set_domain_pot.m: Initial + revision + +2003-07-28 19:44 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/: dbn_init_bel.m, + dbn_marginal_from_bel.m, dbn_update_bel.m, dbn_update_bel1.m, + marginal_family.m, update_engine.m: Initial import of code base + from Kevin Murphy. + +2003-07-28 19:44 yozhik + + * BNT/inference/dynamic/@cbk_inf_engine/: dbn_init_bel.m, + dbn_marginal_from_bel.m, dbn_update_bel.m, dbn_update_bel1.m, + marginal_family.m, update_engine.m: Initial revision + +2003-07-28 15:44 yozhik + + * GraphViz/: approxeq.m, process_options.m: Initial import of code + base from Kevin Murphy. + +2003-07-28 15:44 yozhik + + * GraphViz/: approxeq.m, process_options.m: Initial revision + +2003-07-24 06:41 yozhik + + * BNT/CPDs/@hhmmQ_CPD/update_ess.m: Initial import of code base + from Kevin Murphy. + +2003-07-24 06:41 yozhik + + * BNT/CPDs/@hhmmQ_CPD/update_ess.m: Initial revision + +2003-07-22 15:55 yozhik + + * BNT/CPDs/@gaussian_CPD/update_ess.m: Initial import of code base + from Kevin Murphy. + +2003-07-22 15:55 yozhik + + * BNT/CPDs/@gaussian_CPD/update_ess.m: Initial revision + +2003-07-06 13:57 yozhik + + * BNT/inference/static/@pearl_inf_engine/bethe_free_energy.m: + Initial import of code base from Kevin Murphy. + +2003-07-06 13:57 yozhik + + * BNT/inference/static/@pearl_inf_engine/bethe_free_energy.m: + Initial revision + +2003-05-21 06:49 yozhik + + * BNT/potentials/@scgpot/: complement_pot.m, normalize_pot.m, + recursive_combine_pots.m: Initial import of code base from Kevin + Murphy. + +2003-05-21 06:49 yozhik + + * BNT/potentials/@scgpot/: complement_pot.m, normalize_pot.m, + recursive_combine_pots.m: Initial revision + +2003-05-20 07:10 yozhik + + * BNT/CPDs/@gaussian_CPD/maximize_params.m: Initial import of code + base from Kevin Murphy. + +2003-05-20 07:10 yozhik + + * BNT/CPDs/@gaussian_CPD/maximize_params.m: Initial revision + +2003-05-13 09:11 yozhik + + * HMM/mhmm_em_demo.m: Initial import of code base from Kevin + Murphy. + +2003-05-13 09:11 yozhik + + * HMM/mhmm_em_demo.m: Initial revision + +2003-05-13 07:35 yozhik + + * BNT/examples/dynamic/viterbi1.m: Initial import of code base from + Kevin Murphy. + +2003-05-13 07:35 yozhik + + * BNT/examples/dynamic/viterbi1.m: Initial revision + +2003-05-11 16:31 yozhik + + * BNT/CPDs/@gaussian_CPD/convert_to_table.m: Initial import of code + base from Kevin Murphy. + +2003-05-11 16:31 yozhik + + * BNT/CPDs/@gaussian_CPD/convert_to_table.m: Initial revision + +2003-05-11 16:13 yozhik + + * BNT/CPDs/@gaussian_CPD/gaussian_CPD_params_given_dps.m: Initial + import of code base from Kevin Murphy. + +2003-05-11 16:13 yozhik + + * BNT/CPDs/@gaussian_CPD/gaussian_CPD_params_given_dps.m: Initial + revision + +2003-05-11 08:39 yozhik + + * BNT/inference/static/@stab_cond_gauss_inf_engine/README: Initial + import of code base from Kevin Murphy. + +2003-05-11 08:39 yozhik + + * BNT/inference/static/@stab_cond_gauss_inf_engine/README: Initial + revision + +2003-05-04 15:31 yozhik + + * BNT/uninstallC_BNT.m: Initial import of code base from Kevin + Murphy. + +2003-05-04 15:31 yozhik + + * BNT/uninstallC_BNT.m: Initial revision + +2003-05-04 15:23 yozhik + + * BNT/examples/dynamic/: dhmm1.m, ghmm1.m, mhmm1.m: Initial import + of code base from Kevin Murphy. + +2003-05-04 15:23 yozhik + + * BNT/examples/dynamic/: dhmm1.m, ghmm1.m, mhmm1.m: Initial + revision + +2003-05-04 15:11 yozhik + + * HMM/mhmm_logprob.m: Initial import of code base from Kevin + Murphy. + +2003-05-04 15:11 yozhik + + * HMM/mhmm_logprob.m: Initial revision + +2003-05-04 15:01 yozhik + + * HMM/: dhmm_logprob.m, dhmm_em_online.m, dhmm_em_online_demo.m: + Initial import of code base from Kevin Murphy. + +2003-05-04 15:01 yozhik + + * HMM/: dhmm_logprob.m, dhmm_em_online.m, dhmm_em_online_demo.m: + Initial revision + +2003-05-04 14:58 yozhik + + * HMM/: pomdp_sample.m, dhmm_sample_endstate.m, dhmm_em_demo.m: + Initial import of code base from Kevin Murphy. + +2003-05-04 14:58 yozhik + + * HMM/: pomdp_sample.m, dhmm_sample_endstate.m, dhmm_em_demo.m: + Initial revision + +2003-05-04 14:47 yozhik + + * + BNT/inference/online/@hmm_2TBN_inf_engine/private/mk_hmm_obs_lik_vec.m: + Initial import of code base from Kevin Murphy. + +2003-05-04 14:47 yozhik + + * + BNT/inference/online/@hmm_2TBN_inf_engine/private/mk_hmm_obs_lik_vec.m: + Initial revision + +2003-05-04 14:42 yozhik + + * + BNT/inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_matrix.m: + Initial import of code base from Kevin Murphy. + +2003-05-04 14:42 yozhik + + * + BNT/inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_matrix.m: + Initial revision + +2003-04-22 14:00 yozhik + + * BNT/CPDs/@tabular_CPD/display.m: Initial import of code base from + Kevin Murphy. + +2003-04-22 14:00 yozhik + + * BNT/CPDs/@tabular_CPD/display.m: Initial revision + +2003-03-28 09:22 yozhik + + * BNT/examples/dynamic/ho1.m: Initial import of code base from + Kevin Murphy. + +2003-03-28 09:22 yozhik + + * BNT/examples/dynamic/ho1.m: Initial revision + +2003-03-28 09:12 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/stab_cond_gauss_inf_engine.m: + Initial import of code base from Kevin Murphy. + +2003-03-28 09:12 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/stab_cond_gauss_inf_engine.m: + Initial revision + +2003-03-28 08:35 yozhik + + * GraphViz/arrow.m: Initial import of code base from Kevin Murphy. + +2003-03-28 08:35 yozhik + + * GraphViz/arrow.m: Initial revision + +2003-03-25 16:06 yozhik + + * BNT/examples/static/Models/mk_asia_bnet.m: Initial import of code + base from Kevin Murphy. + +2003-03-25 16:06 yozhik + + * BNT/examples/static/Models/mk_asia_bnet.m: Initial revision + +2003-03-20 07:07 yozhik + + * BNT/potentials/@scgpot/README: Initial import of code base from + Kevin Murphy. + +2003-03-20 07:07 yozhik + + * BNT/potentials/@scgpot/README: Initial revision + +2003-03-14 01:45 yozhik + + * + BNT/inference/dynamic/@stable_ho_inf_engine/stable_ho_inf_engine.m: + Initial import of code base from Kevin Murphy. + +2003-03-14 01:45 yozhik + + * + BNT/inference/dynamic/@stable_ho_inf_engine/stable_ho_inf_engine.m: + Initial revision + +2003-03-12 02:38 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/enter_evidence.m: + Initial import of code base from Kevin Murphy. + +2003-03-12 02:38 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/enter_evidence.m: + Initial revision + +2003-03-11 10:07 yozhik + + * BNT/potentials/@scgpot/reduce_pot.m: Initial import of code base + from Kevin Murphy. + +2003-03-11 10:07 yozhik + + * BNT/potentials/@scgpot/reduce_pot.m: Initial revision + +2003-03-11 09:49 yozhik + + * BNT/potentials/@scgpot/combine_pots.m: Initial import of code + base from Kevin Murphy. + +2003-03-11 09:49 yozhik + + * BNT/potentials/@scgpot/combine_pots.m: Initial revision + +2003-03-11 09:37 yozhik + + * BNT/potentials/@scgcpot/reduce_pot.m: Initial import of code base + from Kevin Murphy. + +2003-03-11 09:37 yozhik + + * BNT/potentials/@scgcpot/reduce_pot.m: Initial revision + +2003-03-11 09:06 yozhik + + * BNT/potentials/@scgpot/marginalize_pot.m: Initial import of code + base from Kevin Murphy. + +2003-03-11 09:06 yozhik + + * BNT/potentials/@scgpot/marginalize_pot.m: Initial revision + +2003-03-11 06:04 yozhik + + * BNT/potentials/@scgpot/scgpot.m: Initial import of code base from + Kevin Murphy. + +2003-03-11 06:04 yozhik + + * BNT/potentials/@scgpot/scgpot.m: Initial revision + +2003-03-09 15:03 yozhik + + * BNT/CPDs/@gaussian_CPD/convert_to_pot.m: Initial import of code + base from Kevin Murphy. + +2003-03-09 15:03 yozhik + + * BNT/CPDs/@gaussian_CPD/convert_to_pot.m: Initial revision + +2003-03-09 14:44 yozhik + + * BNT/CPDs/@tabular_CPD/maximize_params.m: Initial import of code + base from Kevin Murphy. + +2003-03-09 14:44 yozhik + + * BNT/CPDs/@tabular_CPD/maximize_params.m: Initial revision + +2003-02-21 03:20 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_difclq_nodes.m: + Initial import of code base from Kevin Murphy. + +2003-02-21 03:20 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_difclq_nodes.m: + Initial revision + +2003-02-21 03:13 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_nodes.m: + Initial import of code base from Kevin Murphy. + +2003-02-21 03:13 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_nodes.m: + Initial revision + +2003-02-19 01:52 yozhik + + * BNT/inference/dynamic/@stable_ho_inf_engine/: enter_evidence.m, + marginal_family.m, marginal_nodes.m, test_ho_inf_enginge.m, + update_engine.m: Initial import of code base from Kevin Murphy. + +2003-02-19 01:52 yozhik + + * BNT/inference/dynamic/@stable_ho_inf_engine/: enter_evidence.m, + marginal_family.m, marginal_nodes.m, test_ho_inf_enginge.m, + update_engine.m: Initial revision + +2003-02-10 07:38 yozhik + + * BNT/inference/static/@stab_cond_gauss_inf_engine/push.m: Initial + import of code base from Kevin Murphy. + +2003-02-10 07:38 yozhik + + * BNT/inference/static/@stab_cond_gauss_inf_engine/push.m: Initial + revision + +2003-02-06 18:25 yozhik + + * KPMtools/checkpsd.m: Initial import of code base from Kevin + Murphy. + +2003-02-06 18:25 yozhik + + * KPMtools/checkpsd.m: Initial revision + +2003-02-05 19:16 yozhik + + * GraphViz/draw_hmm.m: Initial import of code base from Kevin + Murphy. + +2003-02-05 19:16 yozhik + + * GraphViz/draw_hmm.m: Initial revision + +2003-02-01 16:23 yozhik + + * BNT/: general/dbn_to_hmm.m, learning/learn_params_dbn.m: Initial + import of code base from Kevin Murphy. + +2003-02-01 16:23 yozhik + + * BNT/: general/dbn_to_hmm.m, learning/learn_params_dbn.m: Initial + revision + +2003-02-01 11:42 yozhik + + * BNT/general/mk_dbn.m: Initial import of code base from Kevin + Murphy. + +2003-02-01 11:42 yozhik + + * BNT/general/mk_dbn.m: Initial revision + +2003-01-30 16:13 yozhik + + * BNT/CPDs/@gaussian_CPD/maximize_params_debug.m: Initial import of + code base from Kevin Murphy. + +2003-01-30 16:13 yozhik + + * BNT/CPDs/@gaussian_CPD/maximize_params_debug.m: Initial revision + +2003-01-30 14:38 yozhik + + * BNT/CPDs/@gaussian_CPD/Old/maximize_params.m: Initial import of + code base from Kevin Murphy. + +2003-01-30 14:38 yozhik + + * BNT/CPDs/@gaussian_CPD/Old/maximize_params.m: Initial revision + +2003-01-29 03:23 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_singleclq_nodes.m: + Initial import of code base from Kevin Murphy. + +2003-01-29 03:23 yozhik + + * + BNT/inference/static/@stab_cond_gauss_inf_engine/marginal_singleclq_nodes.m: + Initial revision + +2003-01-24 11:36 yozhik + + * Kalman/sample_lds.m: Initial import of code base from Kevin + Murphy. + +2003-01-24 11:36 yozhik + + * Kalman/sample_lds.m: Initial revision + +2003-01-24 04:52 yozhik + + * BNT/potentials/@scgpot/extension_pot.m: Initial import of code + base from Kevin Murphy. + +2003-01-24 04:52 yozhik + + * BNT/potentials/@scgpot/extension_pot.m: Initial revision + +2003-01-23 10:49 yozhik + + * BNT/: general/convert_dbn_CPDs_to_tables1.m, + inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_vec.m: + Initial import of code base from Kevin Murphy. + +2003-01-23 10:49 yozhik + + * BNT/: general/convert_dbn_CPDs_to_tables1.m, + inference/dynamic/@hmm_inf_engine/private/mk_hmm_obs_lik_vec.m: + Initial revision + +2003-01-23 10:44 yozhik + + * BNT/general/convert_dbn_CPDs_to_tables.m: Initial import of code + base from Kevin Murphy. + +2003-01-23 10:44 yozhik + + * BNT/general/convert_dbn_CPDs_to_tables.m: Initial revision + +2003-01-22 13:38 yozhik + + * BNT/inference/dynamic/@hmm_inf_engine/enter_evidence.m: Initial + import of code base from Kevin Murphy. + +2003-01-22 13:38 yozhik + + * BNT/inference/dynamic/@hmm_inf_engine/enter_evidence.m: Initial + revision + +2003-01-22 12:32 yozhik + + * HMM/mc_sample_endstate.m: Initial import of code base from Kevin + Murphy. + +2003-01-22 12:32 yozhik + + * HMM/mc_sample_endstate.m: Initial revision + +2003-01-22 09:56 yozhik + + * HMM/fixed_lag_smoother.m: Initial import of code base from Kevin + Murphy. + +2003-01-22 09:56 yozhik + + * HMM/fixed_lag_smoother.m: Initial revision + +2003-01-20 08:56 yozhik + + * GraphViz/draw_graph_test.m: Initial import of code base from + Kevin Murphy. + +2003-01-20 08:56 yozhik + + * GraphViz/draw_graph_test.m: Initial revision + +2003-01-18 15:10 yozhik + + * BNT/general/dsep_test.m: Initial import of code base from Kevin + Murphy. + +2003-01-18 15:10 yozhik + + * BNT/general/dsep_test.m: Initial revision + +2003-01-18 15:00 yozhik + + * BNT/copyright.txt: Initial import of code base from Kevin Murphy. + +2003-01-18 15:00 yozhik + + * BNT/copyright.txt: Initial revision + +2003-01-18 14:49 yozhik + + * Kalman/tracking_demo.m: Initial import of code base from Kevin + Murphy. + +2003-01-18 14:49 yozhik + + * Kalman/tracking_demo.m: Initial revision + +2003-01-18 14:22 yozhik + + * BNT/: examples/dummy, inference/dummy, inference/dynamic/dummy, + inference/online/dummy, inference/static/dummy: Initial import of + code base from Kevin Murphy. + +2003-01-18 14:22 yozhik + + * BNT/: examples/dummy, inference/dummy, inference/dynamic/dummy, + inference/online/dummy, inference/static/dummy: Initial revision + +2003-01-18 14:16 yozhik + + * BNT/examples/dynamic/: ehmm1.m, jtree_clq_test.m: Initial import + of code base from Kevin Murphy. + +2003-01-18 14:16 yozhik + + * BNT/examples/dynamic/: ehmm1.m, jtree_clq_test.m: Initial + revision + +2003-01-18 14:11 yozhik + + * BNT/inference/static/: + @jtree_sparse_inf_engine/jtree_sparse_inf_engine.m, + @jtree_mnet_inf_engine/jtree_mnet_inf_engine.m: Initial import of + code base from Kevin Murphy. + +2003-01-18 14:11 yozhik + + * BNT/inference/static/: + @jtree_sparse_inf_engine/jtree_sparse_inf_engine.m, + @jtree_mnet_inf_engine/jtree_mnet_inf_engine.m: Initial revision + +2003-01-18 13:17 yozhik + + * GraphViz/draw_dbn_test.m: Initial import of code base from Kevin + Murphy. + +2003-01-18 13:17 yozhik + + * GraphViz/draw_dbn_test.m: Initial revision + +2003-01-11 10:53 yozhik + + * BNT/inference/static/@pearl_inf_engine/pearl_inf_engine.m: + Initial import of code base from Kevin Murphy. + +2003-01-11 10:53 yozhik + + * BNT/inference/static/@pearl_inf_engine/pearl_inf_engine.m: + Initial revision + +2003-01-11 10:48 yozhik + + * BNT/examples/dynamic/HHMM/Map/learn_map.m: Initial import of code + base from Kevin Murphy. + +2003-01-11 10:48 yozhik + + * BNT/examples/dynamic/HHMM/Map/learn_map.m: Initial revision + +2003-01-11 10:41 yozhik + + * BNT/inference/dynamic/@jtree_dbn_inf_engine/enter_evidence.m: + Initial import of code base from Kevin Murphy. + +2003-01-11 10:41 yozhik + + * BNT/inference/dynamic/@jtree_dbn_inf_engine/enter_evidence.m: + Initial revision + +2003-01-11 10:13 yozhik + + * BNT/inference/dynamic/@bk_inf_engine/enter_soft_evidence.m: + Initial import of code base from Kevin Murphy. + +2003-01-11 10:13 yozhik + + * BNT/inference/dynamic/@bk_inf_engine/enter_soft_evidence.m: + Initial revision + +2003-01-07 08:25 yozhik + + * BNT/CPDs/@softmax_CPD/softmax_CPD.m: Initial import of code base + from Kevin Murphy. + +2003-01-07 08:25 yozhik + + * BNT/CPDs/@softmax_CPD/softmax_CPD.m: Initial revision + +2003-01-03 14:01 yozhik + + * + BNT/inference/static/@belprop_mrf2_inf_engine/belprop_mrf2_inf_engine.m: + Initial import of code base from Kevin Murphy. + +2003-01-03 14:01 yozhik + + * + BNT/inference/static/@belprop_mrf2_inf_engine/belprop_mrf2_inf_engine.m: + Initial revision + +2003-01-02 09:49 yozhik + + * BNT/inference/static/@belprop_mrf2_inf_engine/find_mpe.m: Initial + import of code base from Kevin Murphy. + +2003-01-02 09:49 yozhik + + * BNT/inference/static/@belprop_mrf2_inf_engine/find_mpe.m: Initial + revision + +2003-01-02 09:28 yozhik + + * BNT/inference/static/@belprop_mrf2_inf_engine/: set_params.m, + enter_soft_evidence.m: Initial import of code base from Kevin + Murphy. + +2003-01-02 09:28 yozhik + + * BNT/inference/static/@belprop_mrf2_inf_engine/: set_params.m, + enter_soft_evidence.m: Initial revision + +2002-12-31 14:06 yozhik + + * BNT/general/mk_mrf2.m: Initial import of code base from Kevin + Murphy. + +2002-12-31 14:06 yozhik + + * BNT/general/mk_mrf2.m: Initial revision + +2002-12-31 13:24 yozhik + + * BNT/inference/static/@belprop_mrf2_inf_engine/marginal_nodes.m: + Initial import of code base from Kevin Murphy. + +2002-12-31 13:24 yozhik + + * BNT/inference/static/@belprop_mrf2_inf_engine/marginal_nodes.m: + Initial revision + +2002-12-31 11:00 yozhik + + * BNT/inference/static/@belprop_inf_engine/belprop_inf_engine.m: + Initial import of code base from Kevin Murphy. + +2002-12-31 11:00 yozhik + + * BNT/inference/static/@belprop_inf_engine/belprop_inf_engine.m: + Initial revision + +2002-12-16 11:16 yozhik + + * BNT/examples/dynamic/HHMM/remove_hhmm_end_state.m: Initial import + of code base from Kevin Murphy. + +2002-12-16 11:16 yozhik + + * BNT/examples/dynamic/HHMM/remove_hhmm_end_state.m: Initial + revision + +2002-12-16 09:57 yozhik + + * BNT/general/unroll_set.m: Initial import of code base from Kevin + Murphy. + +2002-12-16 09:57 yozhik + + * BNT/general/unroll_set.m: Initial revision + +2002-11-26 14:14 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/mgram3.m: Initial import of code + base from Kevin Murphy. + +2002-11-26 14:14 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/mgram3.m: Initial revision + +2002-11-26 14:04 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/mgram2.m: Initial import of code + base from Kevin Murphy. + +2002-11-26 14:04 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/mgram2.m: Initial revision + +2002-11-22 16:44 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/Old/mgram2.m: Initial import of + code base from Kevin Murphy. + +2002-11-22 16:44 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/Old/mgram2.m: Initial revision + +2002-11-22 15:59 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/mgram1.m: Initial import of code + base from Kevin Murphy. + +2002-11-22 15:59 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/mgram1.m: Initial revision + +2002-11-22 15:51 yozhik + + * BNT/inference/dynamic/@jtree_dbn_inf_engine/marginal_nodes.m: + Initial import of code base from Kevin Murphy. + +2002-11-22 15:51 yozhik + + * BNT/inference/dynamic/@jtree_dbn_inf_engine/marginal_nodes.m: + Initial revision + +2002-11-22 15:07 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/: num2letter.m, letter2num.m: + Initial import of code base from Kevin Murphy. + +2002-11-22 15:07 yozhik + + * BNT/examples/dynamic/HHMM/Mgram/: num2letter.m, letter2num.m: + Initial revision + +2002-11-22 14:35 yozhik + + * BNT/general/convert_dbn_CPDs_to_pots.m: Initial import of code + base from Kevin Murphy. + +2002-11-22 14:35 yozhik + + * BNT/general/convert_dbn_CPDs_to_pots.m: Initial revision + +2002-11-22 13:45 yozhik + + * HMM/mk_rightleft_transmat.m: Initial import of code base from + Kevin Murphy. + +2002-11-22 13:45 yozhik + + * HMM/mk_rightleft_transmat.m: Initial revision + +2002-11-14 12:33 yozhik + + * BNT/examples/dynamic/water2.m: Initial import of code base from + Kevin Murphy. + +2002-11-14 12:33 yozhik + + * BNT/examples/dynamic/water2.m: Initial revision + +2002-11-14 12:07 yozhik + + * BNT/examples/dynamic/water1.m: Initial import of code base from + Kevin Murphy. + +2002-11-14 12:07 yozhik + + * BNT/examples/dynamic/water1.m: Initial revision + +2002-11-14 12:02 yozhik + + * BNT/inference/: online/@hmm_2TBN_inf_engine/marginal_nodes.m, + dynamic/@hmm_inf_engine/marginal_nodes.m, + online/@hmm_2TBN_inf_engine/hmm_2TBN_inf_engine.m, + dynamic/@hmm_inf_engine/hmm_inf_engine.m, + dynamic/@hmm_inf_engine/marginal_family.m, + online/@hmm_2TBN_inf_engine/marginal_family.m: Initial import of + code base from Kevin Murphy. + +2002-11-14 12:02 yozhik + + * BNT/inference/: online/@hmm_2TBN_inf_engine/marginal_nodes.m, + dynamic/@hmm_inf_engine/marginal_nodes.m, + online/@hmm_2TBN_inf_engine/hmm_2TBN_inf_engine.m, + dynamic/@hmm_inf_engine/hmm_inf_engine.m, + dynamic/@hmm_inf_engine/marginal_family.m, + online/@hmm_2TBN_inf_engine/marginal_family.m: Initial revision + +2002-11-14 08:31 yozhik + + * BNT/inference/: + online/@jtree_2TBN_inf_engine/jtree_2TBN_inf_engine.m, + dynamic/@jtree_dbn_inf_engine/jtree_dbn_inf_engine.m: Initial + import of code base from Kevin Murphy. + +2002-11-14 08:31 yozhik + + * BNT/inference/: + online/@jtree_2TBN_inf_engine/jtree_2TBN_inf_engine.m, + dynamic/@jtree_dbn_inf_engine/jtree_dbn_inf_engine.m: Initial + revision + +2002-11-13 17:01 yozhik + + * BNT/examples/: static/qmr2.m, dynamic/arhmm1.m: Initial import of + code base from Kevin Murphy. + +2002-11-13 17:01 yozhik + + * BNT/examples/: static/qmr2.m, dynamic/arhmm1.m: Initial revision + +2002-11-03 08:44 yozhik + + * BNT/examples/static/Models/mk_alarm_bnet.m: Initial import of + code base from Kevin Murphy. + +2002-11-03 08:44 yozhik + + * BNT/examples/static/Models/mk_alarm_bnet.m: Initial revision + +2002-11-01 16:32 yozhik + + * Kalman/kalman_forward_backward.m: Initial import of code base + from Kevin Murphy. + +2002-11-01 16:32 yozhik + + * Kalman/kalman_forward_backward.m: Initial revision + +2002-10-23 08:17 yozhik + + * Kalman/learning_demo.m: Initial import of code base from Kevin + Murphy. + +2002-10-23 08:17 yozhik + + * Kalman/learning_demo.m: Initial revision + +2002-10-18 13:05 yozhik + + * BNT/inference/static/@pearl_inf_engine/marginal_family.m: Initial + import of code base from Kevin Murphy. + +2002-10-18 13:05 yozhik + + * BNT/inference/static/@pearl_inf_engine/marginal_family.m: Initial + revision + +2002-10-10 16:45 yozhik + + * BNT/examples/dynamic/jtree_clq_test2.m: Initial import of code + base from Kevin Murphy. + +2002-10-10 16:45 yozhik + + * BNT/examples/dynamic/jtree_clq_test2.m: Initial revision + +2002-10-10 16:14 yozhik + + * BNT/examples/dynamic/: mk_mildew_dbn.m, mk_uffe_dbn.m: Initial + import of code base from Kevin Murphy. + +2002-10-10 16:14 yozhik + + * BNT/examples/dynamic/: mk_mildew_dbn.m, mk_uffe_dbn.m: Initial + revision + +2002-10-09 13:36 yozhik + + * BNT/examples/dynamic/mk_ps_from_clqs.m: Initial import of code + base from Kevin Murphy. + +2002-10-09 13:36 yozhik + + * BNT/examples/dynamic/mk_ps_from_clqs.m: Initial revision + +2002-10-07 06:26 yozhik + + * BNT/CPDs/@deterministic_CPD/deterministic_CPD.m: Initial import + of code base from Kevin Murphy. + +2002-10-07 06:26 yozhik + + * BNT/CPDs/@deterministic_CPD/deterministic_CPD.m: Initial revision + +2002-10-02 08:39 yozhik + + * BNT/potentials/Tables/marg_tableC.c: Initial import of code base + from Kevin Murphy. + +2002-10-02 08:39 yozhik + + * BNT/potentials/Tables/marg_tableC.c: Initial revision + +2002-10-02 08:28 yozhik + + * BNT/potentials/Tables/: mult_by_tableM.m, mult_by_table2.m: + Initial import of code base from Kevin Murphy. + +2002-10-02 08:28 yozhik + + * BNT/potentials/Tables/: mult_by_tableM.m, mult_by_table2.m: + Initial revision + +2002-10-01 14:33 yozhik + + * BNT/potentials/Tables/mult_by_tableC.c: Initial import of code + base from Kevin Murphy. + +2002-10-01 14:33 yozhik + + * BNT/potentials/Tables/mult_by_tableC.c: Initial revision + +2002-10-01 14:23 yozhik + + * BNT/potentials/Tables/mult_by_table.c: Initial import of code + base from Kevin Murphy. + +2002-10-01 14:23 yozhik + + * BNT/potentials/Tables/mult_by_table.c: Initial revision + +2002-10-01 14:20 yozhik + + * BNT/potentials/Tables/repmat_and_mult.c: Initial import of code + base from Kevin Murphy. + +2002-10-01 14:20 yozhik + + * BNT/potentials/Tables/repmat_and_mult.c: Initial revision + +2002-10-01 12:04 yozhik + + * BNT/potentials/@dpot/dpot.m: Initial import of code base from + Kevin Murphy. + +2002-10-01 12:04 yozhik + + * BNT/potentials/@dpot/dpot.m: Initial revision + +2002-10-01 11:21 yozhik + + * BNT/examples/static/Belprop/belprop_polytree_discrete.m: Initial + import of code base from Kevin Murphy. + +2002-10-01 11:21 yozhik + + * BNT/examples/static/Belprop/belprop_polytree_discrete.m: Initial + revision + +2002-10-01 11:16 yozhik + + * BNT/examples/static/cmp_inference_static.m: Initial import of + code base from Kevin Murphy. + +2002-10-01 11:16 yozhik + + * BNT/examples/static/cmp_inference_static.m: Initial revision + +2002-10-01 10:39 yozhik + + * BNT/potentials/Tables/marg_tableM.m: Initial import of code base + from Kevin Murphy. + +2002-10-01 10:39 yozhik + + * BNT/potentials/Tables/marg_tableM.m: Initial revision + +2002-09-29 03:21 yozhik + + * BNT/potentials/Tables/mult_by_table_global.m: Initial import of + code base from Kevin Murphy. + +2002-09-29 03:21 yozhik + + * BNT/potentials/Tables/mult_by_table_global.m: Initial revision + +2002-09-26 01:39 yozhik + + * BNT/learning/learn_struct_K2.m: Initial import of code base from + Kevin Murphy. + +2002-09-26 01:39 yozhik + + * BNT/learning/learn_struct_K2.m: Initial revision + +2002-09-24 15:43 yozhik + + * BNT/: CPDs/@hhmm2Q_CPD/update_ess.m, + CPDs/@hhmm2Q_CPD/maximize_params.m, + examples/dynamic/HHMM/Map/disp_map_hhmm.m: Initial import of code + base from Kevin Murphy. + +2002-09-24 15:43 yozhik + + * BNT/: CPDs/@hhmm2Q_CPD/update_ess.m, + CPDs/@hhmm2Q_CPD/maximize_params.m, + examples/dynamic/HHMM/Map/disp_map_hhmm.m: Initial revision + +2002-09-24 15:34 yozhik + + * BNT/CPDs/@hhmm2Q_CPD/: hhmm2Q_CPD.m, reset_ess.m: Initial import + of code base from Kevin Murphy. + +2002-09-24 15:34 yozhik + + * BNT/CPDs/@hhmm2Q_CPD/: hhmm2Q_CPD.m, reset_ess.m: Initial + revision + +2002-09-24 15:13 yozhik + + * BNT/examples/dynamic/HHMM/Map/mk_rnd_map_hhmm.m: Initial import + of code base from Kevin Murphy. + +2002-09-24 15:13 yozhik + + * BNT/examples/dynamic/HHMM/Map/mk_rnd_map_hhmm.m: Initial revision + +2002-09-24 06:10 yozhik + + * BNT/CPDs/@hhmmQ_CPD/maximize_params.m: Initial import of code + base from Kevin Murphy. + +2002-09-24 06:10 yozhik + + * BNT/CPDs/@hhmmQ_CPD/maximize_params.m: Initial revision + +2002-09-24 06:02 yozhik + + * BNT/examples/dynamic/HHMM/Map/sample_from_map.m: Initial import + of code base from Kevin Murphy. + +2002-09-24 06:02 yozhik + + * BNT/examples/dynamic/HHMM/Map/sample_from_map.m: Initial revision + +2002-09-24 05:46 yozhik + + * BNT/CPDs/@hhmm2Q_CPD/CPD_to_CPT.m: Initial import of code base + from Kevin Murphy. + +2002-09-24 05:46 yozhik + + * BNT/CPDs/@hhmm2Q_CPD/CPD_to_CPT.m: Initial revision + +2002-09-24 03:49 yozhik + + * BNT/examples/dynamic/HHMM/Map/mk_map_hhmm.m: Initial import of + code base from Kevin Murphy. + +2002-09-24 03:49 yozhik + + * BNT/examples/dynamic/HHMM/Map/mk_map_hhmm.m: Initial revision + +2002-09-24 00:02 yozhik + + * BNT/examples/dynamic/HHMM/Map/Old/mk_map_hhmm.m: Initial import + of code base from Kevin Murphy. + +2002-09-24 00:02 yozhik + + * BNT/examples/dynamic/HHMM/Map/Old/mk_map_hhmm.m: Initial revision + +2002-09-23 21:19 yozhik + + * BNT/CPDs/@hhmmQ_CPD/hhmmQ_CPD.m: Initial import of code base from + Kevin Murphy. + +2002-09-23 21:19 yozhik + + * BNT/CPDs/@hhmmQ_CPD/hhmmQ_CPD.m: Initial revision + +2002-09-23 19:58 yozhik + + * BNT/CPDs/@hhmmQ_CPD/update_CPT.m: Initial import of code base + from Kevin Murphy. + +2002-09-23 19:58 yozhik + + * BNT/CPDs/@hhmmQ_CPD/update_CPT.m: Initial revision + +2002-09-23 19:30 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/update_CPT.m: Initial import of code base + from Kevin Murphy. + +2002-09-23 19:30 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/update_CPT.m: Initial revision + +2002-09-21 14:37 yozhik + + * BNT/examples/dynamic/HHMM/abcd_hhmm.m: Initial import of code + base from Kevin Murphy. + +2002-09-21 14:37 yozhik + + * BNT/examples/dynamic/HHMM/abcd_hhmm.m: Initial revision + +2002-09-21 13:58 yozhik + + * BNT/examples/dynamic/HHMM/mk_hhmm.m: Initial import of code base + from Kevin Murphy. + +2002-09-21 13:58 yozhik + + * BNT/examples/dynamic/HHMM/mk_hhmm.m: Initial revision + +2002-09-10 10:44 yozhik + + * BNT/CPDs/@gaussian_CPD/log_prob_node.m: Initial import of code + base from Kevin Murphy. + +2002-09-10 10:44 yozhik + + * BNT/CPDs/@gaussian_CPD/log_prob_node.m: Initial revision + +2002-07-28 16:09 yozhik + + * BNT/learning/: learn_struct_pdag_pc_constrain.m, CovMat.m: + Initial import of code base from Kevin Murphy. + +2002-07-28 16:09 yozhik + + * BNT/learning/: learn_struct_pdag_pc_constrain.m, CovMat.m: + Initial revision + +2002-07-24 07:48 yozhik + + * BNT/general/hodbn_to_bnet.m: Initial import of code base from + Kevin Murphy. + +2002-07-24 07:48 yozhik + + * BNT/general/hodbn_to_bnet.m: Initial revision + +2002-07-23 06:17 yozhik + + * BNT/general/mk_higher_order_dbn.m: Initial import of code base + from Kevin Murphy. + +2002-07-23 06:17 yozhik + + * BNT/general/mk_higher_order_dbn.m: Initial revision + +2002-07-20 18:25 yozhik + + * BNT/inference/online/@jtree_2TBN_inf_engine/set_fields.m: Initial + import of code base from Kevin Murphy. + +2002-07-20 18:25 yozhik + + * BNT/inference/online/@jtree_2TBN_inf_engine/set_fields.m: Initial + revision + +2002-07-20 17:32 yozhik + + * BNT/inference/online/@jtree_2TBN_inf_engine/back_mpe.m: Initial + import of code base from Kevin Murphy. + +2002-07-20 17:32 yozhik + + * BNT/inference/online/@jtree_2TBN_inf_engine/back_mpe.m: Initial + revision + +2002-07-02 15:56 yozhik + + * BNT/examples/dynamic/HHMM/Motif/learn_motif_hhmm.m: Initial + import of code base from Kevin Murphy. + +2002-07-02 15:56 yozhik + + * BNT/examples/dynamic/HHMM/Motif/learn_motif_hhmm.m: Initial + revision + +2002-06-27 13:34 yozhik + + * BNT/general/add_ev_to_dmarginal.m: Initial import of code base + from Kevin Murphy. + +2002-06-27 13:34 yozhik + + * BNT/general/add_ev_to_dmarginal.m: Initial revision + +2002-06-24 16:54 yozhik + + * BNT/CPDs/@hhmmF_CPD/update_ess.m: Initial import of code base + from Kevin Murphy. + +2002-06-24 16:54 yozhik + + * BNT/CPDs/@hhmmF_CPD/update_ess.m: Initial revision + +2002-06-24 16:38 yozhik + + * BNT/CPDs/@hhmmF_CPD/hhmmF_CPD.m: Initial import of code base from + Kevin Murphy. + +2002-06-24 16:38 yozhik + + * BNT/CPDs/@hhmmF_CPD/hhmmF_CPD.m: Initial revision + +2002-06-24 15:45 yozhik + + * BNT/CPDs/@hhmmF_CPD/update_CPT.m: Initial import of code base + from Kevin Murphy. + +2002-06-24 15:45 yozhik + + * BNT/CPDs/@hhmmF_CPD/update_CPT.m: Initial revision + +2002-06-24 15:35 yozhik + + * BNT/CPDs/@hhmmF_CPD/Old/: hhmmF_CPD.m, log_prior.m, + maximize_params.m, reset_ess.m, update_CPT.m, update_ess.m: + Initial import of code base from Kevin Murphy. + +2002-06-24 15:35 yozhik + + * BNT/CPDs/@hhmmF_CPD/Old/: hhmmF_CPD.m, log_prior.m, + maximize_params.m, reset_ess.m, update_CPT.m, update_ess.m: + Initial revision + +2002-06-24 15:23 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/update_ess4.m: Initial import of code + base from Kevin Murphy. + +2002-06-24 15:23 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/update_ess4.m: Initial revision + +2002-06-24 15:08 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/update_ess3.m: Initial import of code + base from Kevin Murphy. + +2002-06-24 15:08 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/update_ess3.m: Initial revision + +2002-06-24 14:20 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/update_ess2.m: Initial import of code + base from Kevin Murphy. + +2002-06-24 14:20 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/update_ess2.m: Initial revision + +2002-06-24 11:56 yozhik + + * BNT/: general/mk_fgraph_given_ev.m, + CPDs/mk_isolated_tabular_CPD.m: Initial import of code base from + Kevin Murphy. + +2002-06-24 11:56 yozhik + + * BNT/: general/mk_fgraph_given_ev.m, + CPDs/mk_isolated_tabular_CPD.m: Initial revision + +2002-06-24 11:19 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/: hhmmQ_CPD.m, log_prior.m, + maximize_params.m, reset_ess.m, update_ess.m: Initial import of + code base from Kevin Murphy. + +2002-06-24 11:19 yozhik + + * BNT/CPDs/@hhmmQ_CPD/Old/: hhmmQ_CPD.m, log_prior.m, + maximize_params.m, reset_ess.m, update_ess.m: Initial revision + +2002-06-20 13:30 yozhik + + * BNT/examples/dynamic/mildew1.m: Initial import of code base from + Kevin Murphy. + +2002-06-20 13:30 yozhik + + * BNT/examples/dynamic/mildew1.m: Initial revision + +2002-06-19 17:18 yozhik + + * BNT/: inference/dynamic/@hmm_inf_engine/find_mpe.m, + examples/dynamic/HHMM/Square/learn_square_hhmm_cts.m: Initial + import of code base from Kevin Murphy. + +2002-06-19 17:18 yozhik + + * BNT/: inference/dynamic/@hmm_inf_engine/find_mpe.m, + examples/dynamic/HHMM/Square/learn_square_hhmm_cts.m: Initial + revision + +2002-06-19 17:03 yozhik + + * BNT/examples/static/fgraph/fg1.m: Initial import of code base + from Kevin Murphy. + +2002-06-19 17:03 yozhik + + * BNT/examples/static/fgraph/fg1.m: Initial revision + +2002-06-19 16:59 yozhik + + * BNT/: examples/static/softev1.m, + inference/static/@belprop_fg_inf_engine/find_mpe.m: Initial + import of code base from Kevin Murphy. + +2002-06-19 16:59 yozhik + + * BNT/: examples/static/softev1.m, + inference/static/@belprop_fg_inf_engine/find_mpe.m: Initial + revision + +2002-06-19 15:11 yozhik + + * BNT/inference/static/@var_elim_inf_engine/find_mpe.m: Initial + import of code base from Kevin Murphy. + +2002-06-19 15:11 yozhik + + * BNT/inference/static/@var_elim_inf_engine/find_mpe.m: Initial + revision + +2002-06-19 15:08 yozhik + + * BNT/: inference/static/@belprop_inf_engine/find_mpe.m, + examples/static/mpe1.m, examples/static/mpe2.m: Initial import of + code base from Kevin Murphy. + +2002-06-19 15:08 yozhik + + * BNT/: inference/static/@belprop_inf_engine/find_mpe.m, + examples/static/mpe1.m, examples/static/mpe2.m: Initial revision + +2002-06-19 15:04 yozhik + + * BNT/inference/static/@var_elim_inf_engine/: + var_elim_inf_engine.m, enter_evidence.m: Initial import of code + base from Kevin Murphy. + +2002-06-19 15:04 yozhik + + * BNT/inference/static/@var_elim_inf_engine/: + var_elim_inf_engine.m, enter_evidence.m: Initial revision + +2002-06-19 14:56 yozhik + + * BNT/inference/static/@global_joint_inf_engine/find_mpe.m: Initial + import of code base from Kevin Murphy. + +2002-06-19 14:56 yozhik + + * BNT/inference/static/@global_joint_inf_engine/find_mpe.m: Initial + revision + +2002-06-17 16:49 yozhik + + * BNT/inference/online/: @jtree_2TBN_inf_engine/back1_mpe.m, + @smoother_engine/find_mpe.m: Initial import of code base from + Kevin Murphy. + +2002-06-17 16:49 yozhik + + * BNT/inference/online/: @jtree_2TBN_inf_engine/back1_mpe.m, + @smoother_engine/find_mpe.m: Initial revision + +2002-06-17 16:46 yozhik + + * BNT/inference/online/: @jtree_2TBN_inf_engine/fwd.m, + @jtree_2TBN_inf_engine/fwd1.m, @smoother_engine/enter_evidence.m: + Initial import of code base from Kevin Murphy. + +2002-06-17 16:46 yozhik + + * BNT/inference/online/: @jtree_2TBN_inf_engine/fwd.m, + @jtree_2TBN_inf_engine/fwd1.m, @smoother_engine/enter_evidence.m: + Initial revision + +2002-06-17 16:38 yozhik + + * BNT/inference/online/@jtree_2TBN_inf_engine/backT_mpe.m: Initial + import of code base from Kevin Murphy. + +2002-06-17 16:38 yozhik + + * BNT/inference/online/@jtree_2TBN_inf_engine/backT_mpe.m: Initial + revision + +2002-06-17 16:34 yozhik + + * BNT/inference/online/@jtree_2TBN_inf_engine/: back.m, backT.m, + back1.m: Initial import of code base from Kevin Murphy. + +2002-06-17 16:34 yozhik + + * BNT/inference/online/@jtree_2TBN_inf_engine/: back.m, backT.m, + back1.m: Initial revision + +2002-06-17 16:14 yozhik + + * BNT/inference/static/@jtree_inf_engine/: find_mpe.m, + find_max_config.m: Initial import of code base from Kevin Murphy. + +2002-06-17 16:14 yozhik + + * BNT/inference/static/@jtree_inf_engine/: find_mpe.m, + find_max_config.m: Initial revision + +2002-06-17 14:58 yozhik + + * BNT/general/Old/calc_mpe.m: Initial import of code base from + Kevin Murphy. + +2002-06-17 14:58 yozhik + + * BNT/general/Old/calc_mpe.m: Initial revision + +2002-06-17 13:59 yozhik + + * BNT/inference/static/@jtree_inf_engine/: enter_evidence.m, + distribute_evidence.m: Initial import of code base from Kevin + Murphy. + +2002-06-17 13:59 yozhik + + * BNT/inference/static/@jtree_inf_engine/: enter_evidence.m, + distribute_evidence.m: Initial revision + +2002-06-17 13:29 yozhik + + * BNT/inference/static/@jtree_mnet_inf_engine/: find_mpe.m, + enter_evidence.m: Initial import of code base from Kevin Murphy. + +2002-06-17 13:29 yozhik + + * BNT/inference/static/@jtree_mnet_inf_engine/: find_mpe.m, + enter_evidence.m: Initial revision + +2002-06-16 13:01 yozhik + + * BNT/general/is_mnet.m: Initial import of code base from Kevin + Murphy. + +2002-06-16 13:01 yozhik + + * BNT/general/is_mnet.m: Initial revision + +2002-06-16 12:52 yozhik + + * BNT/general/mk_mnet.m: Initial import of code base from Kevin + Murphy. + +2002-06-16 12:52 yozhik + + * BNT/general/mk_mnet.m: Initial revision + +2002-06-16 12:34 yozhik + + * BNT/inference/static/@jtree_inf_engine/init_pot.m: Initial import + of code base from Kevin Murphy. + +2002-06-16 12:34 yozhik + + * BNT/inference/static/@jtree_inf_engine/init_pot.m: Initial + revision + +2002-06-16 12:06 yozhik + + * BNT/potentials/@dpot/find_most_prob_entry.m: Initial import of + code base from Kevin Murphy. + +2002-06-16 12:06 yozhik + + * BNT/potentials/@dpot/find_most_prob_entry.m: Initial revision + +2002-05-31 03:25 yozhik + + * BNT/general/unroll_higher_order_topology.m: Initial import of + code base from Kevin Murphy. + +2002-05-31 03:25 yozhik + + * BNT/general/unroll_higher_order_topology.m: Initial revision + +2002-05-29 08:59 yozhik + + * BNT/@assocarray/assocarray.m, + BNT/CPDs/@boolean_CPD/boolean_CPD.m, + BNT/CPDs/@discrete_CPD/CPD_to_lambda_msg.m, + BNT/CPDs/@discrete_CPD/CPD_to_pi.m, + BNT/CPDs/@discrete_CPD/CPD_to_scgpot.m, + BNT/CPDs/@discrete_CPD/README, + BNT/CPDs/@discrete_CPD/convert_CPD_to_table_hidden_ps.m, + BNT/CPDs/@discrete_CPD/convert_obs_CPD_to_table.m, + BNT/CPDs/@discrete_CPD/convert_to_sparse_table.c, + BNT/CPDs/@discrete_CPD/convert_to_table.m, + BNT/CPDs/@discrete_CPD/discrete_CPD.m, + BNT/CPDs/@discrete_CPD/dom_sizes.m, + BNT/CPDs/@discrete_CPD/log_prob_node.m, + BNT/CPDs/@discrete_CPD/prob_node.m, + BNT/CPDs/@discrete_CPD/sample_node.m, + BNT/CPDs/@discrete_CPD/Old/convert_to_pot.m, + BNT/CPDs/@discrete_CPD/Old/convert_to_table.m, + BNT/CPDs/@discrete_CPD/Old/prob_CPD.m, + BNT/CPDs/@discrete_CPD/Old/prob_node.m, + BNT/CPDs/@discrete_CPD/private/prod_CPT_and_pi_msgs.m, + BNT/CPDs/@gaussian_CPD/CPD_to_lambda_msg.m, + BNT/CPDs/@gaussian_CPD/CPD_to_pi.m, + BNT/CPDs/@gaussian_CPD/CPD_to_scgpot.m, + BNT/CPDs/@gaussian_CPD/adjustable_CPD.m, + BNT/CPDs/@gaussian_CPD/convert_CPD_to_table_hidden_ps.m, + BNT/CPDs/@gaussian_CPD/display.m, + BNT/CPDs/@gaussian_CPD/get_field.m, + BNT/CPDs/@gaussian_CPD/reset_ess.m, + BNT/CPDs/@gaussian_CPD/sample_node.m, + BNT/CPDs/@gaussian_CPD/set_fields.m, + BNT/CPDs/@gaussian_CPD/Old/CPD_to_lambda_msg.m, + BNT/CPDs/@gaussian_CPD/Old/gaussian_CPD.m, + BNT/CPDs/@gaussian_CPD/Old/log_prob_node.m, + BNT/CPDs/@gaussian_CPD/Old/update_ess.m, + BNT/CPDs/@gaussian_CPD/Old/update_tied_ess.m, + BNT/CPDs/@gaussian_CPD/private/CPD_to_linear_gaussian.m, + BNT/CPDs/@generic_CPD/README, + BNT/CPDs/@generic_CPD/adjustable_CPD.m, + BNT/CPDs/@generic_CPD/display.m, + BNT/CPDs/@generic_CPD/generic_CPD.m, + BNT/CPDs/@generic_CPD/log_prior.m, + BNT/CPDs/@generic_CPD/set_clamped.m, + BNT/CPDs/@generic_CPD/Old/BIC_score_CPD.m, + BNT/CPDs/@generic_CPD/Old/CPD_to_dpots.m, + BNT/CPDs/@gmux_CPD/CPD_to_lambda_msg.m, + BNT/CPDs/@gmux_CPD/convert_to_pot.m, + BNT/CPDs/@gmux_CPD/CPD_to_pi.m, BNT/CPDs/@gmux_CPD/display.m, + BNT/CPDs/@gmux_CPD/gmux_CPD.m, BNT/CPDs/@gmux_CPD/sample_node.m, + BNT/CPDs/@gmux_CPD/Old/gmux_CPD.m, + BNT/CPDs/@hhmmF_CPD/log_prior.m, + BNT/CPDs/@hhmmF_CPD/maximize_params.m, + BNT/CPDs/@hhmmF_CPD/reset_ess.m, BNT/CPDs/@hhmmQ_CPD/log_prior.m, + BNT/CPDs/@hhmmQ_CPD/reset_ess.m, + BNT/CPDs/@mlp_CPD/convert_to_table.m, + BNT/CPDs/@mlp_CPD/maximize_params.m, BNT/CPDs/@mlp_CPD/mlp_CPD.m, + BNT/CPDs/@mlp_CPD/reset_ess.m, BNT/CPDs/@mlp_CPD/update_ess.m, + BNT/CPDs/@noisyor_CPD/CPD_to_lambda_msg.m, + BNT/CPDs/@noisyor_CPD/CPD_to_pi.m, + BNT/CPDs/@noisyor_CPD/noisyor_CPD.m, + BNT/CPDs/@noisyor_CPD/private/sum_prod_CPD_and_pi_msgs.m, + BNT/CPDs/@root_CPD/CPD_to_pi.m, + BNT/CPDs/@root_CPD/convert_to_pot.m, + BNT/CPDs/@root_CPD/log_marg_prob_node.m, + BNT/CPDs/@root_CPD/log_prob_node.m, + BNT/CPDs/@root_CPD/root_CPD.m, BNT/CPDs/@root_CPD/sample_node.m, + BNT/CPDs/@root_CPD/Old/CPD_to_CPT.m, + BNT/CPDs/@softmax_CPD/convert_to_pot.m, + BNT/CPDs/@softmax_CPD/display.m, + BNT/CPDs/@softmax_CPD/get_field.m, + BNT/CPDs/@softmax_CPD/maximize_params.m, + BNT/CPDs/@softmax_CPD/reset_ess.m, + BNT/CPDs/@softmax_CPD/sample_node.m, + BNT/CPDs/@softmax_CPD/set_fields.m, + BNT/CPDs/@softmax_CPD/update_ess.m, + BNT/CPDs/@softmax_CPD/private/extract_params.m, + BNT/CPDs/@tabular_CPD/CPD_to_CPT.m, + BNT/CPDs/@tabular_CPD/bayes_update_params.m, + BNT/CPDs/@tabular_CPD/log_nextcase_prob_node.m, + BNT/CPDs/@tabular_CPD/log_prior.m, + BNT/CPDs/@tabular_CPD/reset_ess.m, + BNT/CPDs/@tabular_CPD/update_ess.m, + BNT/CPDs/@tabular_CPD/update_ess_simple.m, + BNT/CPDs/@tabular_CPD/Old/BIC_score_CPD.m, + BNT/CPDs/@tabular_CPD/Old/bayesian_score_CPD.m, + BNT/CPDs/@tabular_CPD/Old/log_marg_prob_node_case.m, + BNT/CPDs/@tabular_CPD/Old/mult_CPD_and_pi_msgs.m, + BNT/CPDs/@tabular_CPD/Old/prob_CPT.m, + BNT/CPDs/@tabular_CPD/Old/prob_node.m, + BNT/CPDs/@tabular_CPD/Old/sample_node.m, + BNT/CPDs/@tabular_CPD/Old/sample_node_single_case.m, + BNT/CPDs/@tabular_CPD/Old/tabular_CPD.m, + BNT/CPDs/@tabular_CPD/Old/update_params.m, + BNT/CPDs/@tabular_decision_node/CPD_to_CPT.m, + BNT/CPDs/@tabular_decision_node/display.m, + BNT/CPDs/@tabular_decision_node/get_field.m, + BNT/CPDs/@tabular_decision_node/set_fields.m, + BNT/CPDs/@tabular_decision_node/tabular_decision_node.m, + BNT/CPDs/@tabular_decision_node/Old/tabular_decision_node.m, + BNT/CPDs/@tabular_kernel/convert_to_pot.m, + BNT/CPDs/@tabular_kernel/convert_to_table.m, + BNT/CPDs/@tabular_kernel/get_field.m, + BNT/CPDs/@tabular_kernel/set_fields.m, + BNT/CPDs/@tabular_kernel/tabular_kernel.m, + BNT/CPDs/@tabular_kernel/Old/tabular_kernel.m, + BNT/CPDs/@tabular_utility_node/convert_to_pot.m, + BNT/CPDs/@tabular_utility_node/display.m, + BNT/CPDs/@tabular_utility_node/tabular_utility_node.m, + BNT/CPDs/@tree_CPD/display.m, + BNT/CPDs/@tree_CPD/evaluate_tree_performance.m, + BNT/CPDs/@tree_CPD/get_field.m, + BNT/CPDs/@tree_CPD/learn_params.m, BNT/CPDs/@tree_CPD/readme.txt, + BNT/CPDs/@tree_CPD/set_fields.m, BNT/CPDs/@tree_CPD/tree_CPD.m, + BNT/CPDs/Old/@linear_gaussian_CPD/linear_gaussian_CPD.m, + BNT/CPDs/Old/@linear_gaussian_CPD/log_marg_prob_node.m, + BNT/CPDs/Old/@linear_gaussian_CPD/update_params_complete.m, + BNT/CPDs/Old/@root_gaussian_CPD/log_marg_prob_node.m, + BNT/CPDs/Old/@root_gaussian_CPD/root_gaussian_CPD.m, + BNT/CPDs/Old/@root_gaussian_CPD/update_params_complete.m, + BNT/CPDs/Old/@tabular_chance_node/CPD_to_upot.m, + BNT/CPDs/Old/@tabular_chance_node/tabular_chance_node.m, + BNT/examples/dynamic/bat1.m, BNT/examples/dynamic/bkff1.m, + BNT/examples/dynamic/chmm1.m, + BNT/examples/dynamic/cmp_inference_dbn.m, + BNT/examples/dynamic/cmp_learning_dbn.m, + BNT/examples/dynamic/cmp_online_inference.m, + BNT/examples/dynamic/fhmm_infer.m, + BNT/examples/dynamic/filter_test1.m, + BNT/examples/dynamic/kalman1.m, + BNT/examples/dynamic/kjaerulff1.m, + BNT/examples/dynamic/loopy_dbn1.m, + BNT/examples/dynamic/mk_collage_from_clqs.m, + BNT/examples/dynamic/mk_fhmm.m, BNT/examples/dynamic/reveal1.m, + BNT/examples/dynamic/scg_dbn.m, + BNT/examples/dynamic/skf_data_assoc_gmux.m, + BNT/examples/dynamic/HHMM/add_hhmm_end_state.m, + BNT/examples/dynamic/HHMM/hhmm_jtree_clqs.m, + BNT/examples/dynamic/HHMM/mk_hhmm_topo.m, + BNT/examples/dynamic/HHMM/mk_hhmm_topo_F1.m, + BNT/examples/dynamic/HHMM/pretty_print_hhmm_parse.m, + BNT/examples/dynamic/HHMM/Motif/fixed_args_mk_motif_hhmm.m, + BNT/examples/dynamic/HHMM/Motif/mk_motif_hhmm.m, + BNT/examples/dynamic/HHMM/Motif/sample_motif_hhmm.m, + BNT/examples/dynamic/HHMM/Old/mk_abcd_hhmm.m, + BNT/examples/dynamic/HHMM/Old/mk_arrow_alpha_hhmm3.m, + BNT/examples/dynamic/HHMM/Old/mk_hhmm2.m, + BNT/examples/dynamic/HHMM/Old/mk_hhmm3.m, + BNT/examples/dynamic/HHMM/Old/mk_hhmm3_args.m, + BNT/examples/dynamic/HHMM/Old/motif_hhmm.m, + BNT/examples/dynamic/HHMM/Old/remove_hhmm_end_state.m, + BNT/examples/dynamic/HHMM/Square/get_square_data.m, + BNT/examples/dynamic/HHMM/Square/hhmm_inference.m, + BNT/examples/dynamic/HHMM/Square/is_F2_true_D3.m, + BNT/examples/dynamic/HHMM/Square/learn_square_hhmm_discrete.m, + BNT/examples/dynamic/HHMM/Square/mk_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/plot_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/sample_square_hhmm_cts.m, + BNT/examples/dynamic/HHMM/Square/sample_square_hhmm_discrete.m, + BNT/examples/dynamic/HHMM/Square/square4.mat, + BNT/examples/dynamic/HHMM/Square/square4_cases.mat, + BNT/examples/dynamic/HHMM/Square/test_square_fig.m, + BNT/examples/dynamic/HHMM/Square/test_square_fig.mat, + BNT/examples/dynamic/HHMM/Square/Old/learn_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/Old/mk_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/Old/plot_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/Old/sample_square_hhmm.m, + BNT/examples/dynamic/Old/chmm1.m, + BNT/examples/dynamic/Old/cmp_inference.m, + BNT/examples/dynamic/Old/kalman1.m, + BNT/examples/dynamic/Old/old.water1.m, + BNT/examples/dynamic/Old/online1.m, + BNT/examples/dynamic/Old/online2.m, + BNT/examples/dynamic/Old/scg_dbn.m, + BNT/examples/dynamic/SLAM/mk_gmux_robot_dbn.m, + BNT/examples/dynamic/SLAM/mk_linear_slam.m, + BNT/examples/dynamic/SLAM/slam_kf.m, + BNT/examples/dynamic/SLAM/slam_offline_loopy.m, + BNT/examples/dynamic/SLAM/slam_partial_kf.m, + BNT/examples/dynamic/SLAM/slam_stationary_loopy.m, + BNT/examples/dynamic/SLAM/Old/offline_loopy_slam.m, + BNT/examples/dynamic/SLAM/Old/paskin1.m, + BNT/examples/dynamic/SLAM/Old/skf_data_assoc_gmux2.m, + BNT/examples/dynamic/SLAM/Old/slam_kf.m, + BNT/examples/limids/id1.m, BNT/examples/limids/pigs1.m, + BNT/examples/static/cg1.m, BNT/examples/static/cg2.m, + BNT/examples/static/discrete2.m, BNT/examples/static/discrete3.m, + BNT/examples/static/fa1.m, BNT/examples/static/gaussian1.m, + BNT/examples/static/gibbs_test1.m, BNT/examples/static/lw1.m, + BNT/examples/static/mfa1.m, BNT/examples/static/mixexp1.m, + BNT/examples/static/mixexp2.m, BNT/examples/static/mixexp3.m, + BNT/examples/static/mog1.m, BNT/examples/static/qmr1.m, + BNT/examples/static/sample1.m, BNT/examples/static/softmax1.m, + BNT/examples/static/Belprop/belprop_loop1_discrete.m, + BNT/examples/static/Belprop/belprop_loop1_gauss.m, + BNT/examples/static/Belprop/belprop_loopy_cg.m, + BNT/examples/static/Belprop/belprop_loopy_discrete.m, + BNT/examples/static/Belprop/belprop_loopy_gauss.m, + BNT/examples/static/Belprop/belprop_polytree_cg.m, + BNT/examples/static/Belprop/belprop_polytree_gauss.m, + BNT/examples/static/Belprop/bp1.m, + BNT/examples/static/Belprop/gmux1.m, + BNT/examples/static/Brutti/Belief_IOhmm.m, + BNT/examples/static/Brutti/Belief_hmdt.m, + BNT/examples/static/Brutti/Belief_hme.m, + BNT/examples/static/Brutti/Sigmoid_Belief.m, + BNT/examples/static/HME/HMEforMatlab.jpg, + BNT/examples/static/HME/README, BNT/examples/static/HME/fhme.m, + BNT/examples/static/HME/gen_data.m, + BNT/examples/static/HME/hme_class_plot.m, + BNT/examples/static/HME/hme_reg_plot.m, + BNT/examples/static/HME/hme_topobuilder.m, + BNT/examples/static/HME/test_data_class.mat, + BNT/examples/static/HME/test_data_class2.mat, + BNT/examples/static/HME/test_data_reg.mat, + BNT/examples/static/HME/train_data_class.mat, + BNT/examples/static/HME/train_data_reg.mat, + BNT/examples/static/Misc/mixexp_data.txt, + BNT/examples/static/Misc/mixexp_graddesc.m, + BNT/examples/static/Misc/mixexp_plot.m, + BNT/examples/static/Misc/sprinkler.bif, + BNT/examples/static/Models/mk_cancer_bnet.m, + BNT/examples/static/Models/mk_car_bnet.m, + BNT/examples/static/Models/mk_ideker_bnet.m, + BNT/examples/static/Models/mk_incinerator_bnet.m, + BNT/examples/static/Models/mk_markov_chain_bnet.m, + BNT/examples/static/Models/mk_minimal_qmr_bnet.m, + BNT/examples/static/Models/mk_qmr_bnet.m, + BNT/examples/static/Models/mk_vstruct_bnet.m, + BNT/examples/static/Models/Old/mk_hmm_bnet.m, + BNT/examples/static/SCG/scg1.m, BNT/examples/static/SCG/scg2.m, + BNT/examples/static/SCG/scg3.m, + BNT/examples/static/SCG/scg_3node.m, + BNT/examples/static/SCG/scg_unstable.m, + BNT/examples/static/StructLearn/bic1.m, + BNT/examples/static/StructLearn/cooper_yoo.m, + BNT/examples/static/StructLearn/k2demo1.m, + BNT/examples/static/StructLearn/mcmc1.m, + BNT/examples/static/StructLearn/pc1.m, + BNT/examples/static/StructLearn/pc2.m, + BNT/examples/static/Zoubin/README, + BNT/examples/static/Zoubin/csum.m, + BNT/examples/static/Zoubin/ffa.m, + BNT/examples/static/Zoubin/mfa.m, + BNT/examples/static/Zoubin/mfa_cl.m, + BNT/examples/static/Zoubin/mfademo.m, + BNT/examples/static/Zoubin/rdiv.m, + BNT/examples/static/Zoubin/rprod.m, + BNT/examples/static/Zoubin/rsum.m, + BNT/examples/static/dtree/test_housing.m, + BNT/examples/static/dtree/test_restaurants.m, + BNT/examples/static/dtree/test_zoo1.m, + BNT/examples/static/dtree/tmp.dot, + BNT/examples/static/dtree/transform_data_into_bnt_format.m, + BNT/examples/static/fgraph/fg2.m, + BNT/examples/static/fgraph/fg3.m, + BNT/examples/static/fgraph/fg_mrf1.m, + BNT/examples/static/fgraph/fg_mrf2.m, + BNT/general/bnet_to_fgraph.m, + BNT/general/compute_fwd_interface.m, + BNT/general/compute_interface_nodes.m, + BNT/general/compute_minimal_interface.m, + BNT/general/dbn_to_bnet.m, + BNT/general/determine_elim_constraints.m, + BNT/general/do_intervention.m, BNT/general/dsep.m, + BNT/general/enumerate_scenarios.m, BNT/general/fgraph_to_bnet.m, + BNT/general/log_lik_complete.m, + BNT/general/log_marg_lik_complete.m, BNT/general/mk_bnet.m, + BNT/general/mk_fgraph.m, BNT/general/mk_limid.m, + BNT/general/mk_mutilated_samples.m, + BNT/general/mk_slice_and_half_dbn.m, + BNT/general/partition_dbn_nodes.m, + BNT/general/sample_bnet_nocell.m, BNT/general/sample_dbn.m, + BNT/general/score_bnet_complete.m, + BNT/general/unroll_dbn_topology.m, + BNT/general/Old/bnet_to_gdl_graph.m, + BNT/general/Old/calc_mpe_bucket.m, + BNT/general/Old/calc_mpe_dbn.m, + BNT/general/Old/calc_mpe_given_inf_engine.m, + BNT/general/Old/calc_mpe_global.m, + BNT/general/Old/compute_interface_nodes.m, + BNT/general/Old/mk_gdl_graph.m, GraphViz/draw_dbn.m, + GraphViz/make_layout.m, BNT/license.gpl.txt, + BNT/general/add_evidence_to_gmarginal.m, + BNT/inference/@inf_engine/bnet_from_engine.m, + BNT/inference/@inf_engine/get_field.m, + BNT/inference/@inf_engine/inf_engine.m, + BNT/inference/@inf_engine/marginal_family.m, + BNT/inference/@inf_engine/set_fields.m, + BNT/inference/@inf_engine/update_engine.m, + BNT/inference/@inf_engine/Old/marginal_family_pot.m, + BNT/inference/@inf_engine/Old/observed_nodes.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/bk_ff_hmm_inf_engine.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_init_bel.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_marginal_from_bel.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_predict_bel.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel1.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_family.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/bk_ff_fb.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/combine_marginals_into_joint.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/dbn_to_hmm.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_mat.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec1.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/project_joint_onto_marginals.m, + BNT/inference/dynamic/@bk_inf_engine/bk_inf_engine.m, + BNT/inference/dynamic/@bk_inf_engine/dbn_init_bel.m, + BNT/inference/dynamic/@bk_inf_engine/dbn_marginal_from_bel.m, + BNT/inference/dynamic/@bk_inf_engine/dbn_update_bel.m, + BNT/inference/dynamic/@bk_inf_engine/dbn_update_bel1.m, + BNT/inference/dynamic/@bk_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@bk_inf_engine/marginal_family.m, + BNT/inference/dynamic/@bk_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@bk_inf_engine/update_engine.m, + BNT/inference/dynamic/@ff_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/enter_soft_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/ff_inf_engine.m, + BNT/inference/dynamic/@ff_inf_engine/filter_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/marginal_family.m, + BNT/inference/dynamic/@ff_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@ff_inf_engine/smooth_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence1.m, + BNT/inference/dynamic/@ff_inf_engine/Old/marginal_family.m, + BNT/inference/dynamic/@frontier_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@frontier_inf_engine/enter_soft_evidence.m, + BNT/inference/dynamic/@frontier_inf_engine/frontier_inf_engine.m, + BNT/inference/dynamic/@frontier_inf_engine/marginal_family.m, + BNT/inference/dynamic/@frontier_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@frontier_inf_engine/set_fwdback.m, + BNT/inference/dynamic/@hmm_inf_engine/update_engine.m, + BNT/inference/dynamic/@hmm_inf_engine/Old/dhmm_inf_engine.m, + BNT/inference/dynamic/@hmm_inf_engine/Old/marginal_family.m, + BNT/inference/dynamic/@hmm_inf_engine/Old/marginal_nodes.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/marginal_family.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence1.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence2.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence3.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence4.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/marginal_nodes.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_nonint.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_trans.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine1.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine2.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/jtree_unrolled_dbn_inf_engine.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_family.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/update_engine.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_family.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_nodes.m, + BNT/inference/dynamic/@kalman_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@kalman_inf_engine/kalman_inf_engine.m, + BNT/inference/dynamic/@kalman_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@kalman_inf_engine/update_engine.m, + BNT/inference/dynamic/@kalman_inf_engine/private/dbn_to_lds.m, + BNT/inference/dynamic/@kalman_inf_engine/private/extract_params_from_gbn.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/enter_soft_ev.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/pearl_dbn_inf_engine.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/correct_smooth.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/enter_evidence.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence_obj_oriented.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence_fast.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/wrong_smooth.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/private/init_pearl_dbn_ev_msgs.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_family.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/pearl_unrolled_dbn_inf_engine.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/update_engine.m, + BNT/inference/online/@filter_engine/bnet_from_engine.m, + BNT/inference/online/@filter_engine/enter_evidence.m, + BNT/inference/online/@filter_engine/filter_engine.m, + BNT/inference/online/@filter_engine/marginal_family.m, + BNT/inference/online/@filter_engine/marginal_nodes.m, + BNT/inference/online/@hmm_2TBN_inf_engine/back.m, + BNT/inference/online/@hmm_2TBN_inf_engine/backT.m, + BNT/inference/online/@hmm_2TBN_inf_engine/fwd.m, + BNT/inference/online/@hmm_2TBN_inf_engine/fwd1.m, + BNT/inference/online/@hmm_2TBN_inf_engine/update_engine.m, + BNT/inference/online/@jtree_2TBN_inf_engine/marginal_family.m, + BNT/inference/online/@jtree_2TBN_inf_engine/marginal_nodes.m, + BNT/inference/online/@jtree_2TBN_inf_engine/Old/jtree_2TBN_inf_engine.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/back.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/back1.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/backT.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/enter_evidence.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/fwd.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/fwd1.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/jtree_sparse_2TBN_inf_engine.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_family.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_nodes.m, + BNT/inference/online/@smoother_engine/bnet_from_engine.m, + BNT/inference/online/@smoother_engine/marginal_family.m, + BNT/inference/online/@smoother_engine/marginal_nodes.m, + BNT/inference/online/@smoother_engine/smoother_engine.m, + BNT/inference/online/@smoother_engine/update_engine.m, + BNT/inference/static/@belprop_fg_inf_engine/belprop_fg_inf_engine.m, + BNT/inference/static/@belprop_fg_inf_engine/enter_evidence.m, + BNT/inference/static/@belprop_fg_inf_engine/loopy_converged.m, + BNT/inference/static/@belprop_fg_inf_engine/marginal_nodes.m, + BNT/inference/static/@belprop_fg_inf_engine/set_params.m, + BNT/inference/static/@belprop_inf_engine/enter_evidence.m, + BNT/inference/static/@belprop_inf_engine/loopy_converged.m, + BNT/inference/static/@belprop_inf_engine/marginal_family.m, + BNT/inference/static/@belprop_inf_engine/marginal_nodes.m, + BNT/inference/static/@belprop_inf_engine/Old/belprop_gdl_inf_engine.m, + BNT/inference/static/@belprop_inf_engine/Old/belprop_inf_engine_nostr.m, + BNT/inference/static/@belprop_inf_engine/Old/enter_evidence.m, + BNT/inference/static/@belprop_inf_engine/Old/enter_evidence1.m, + BNT/inference/static/@belprop_inf_engine/Old/marginal_domain.m, + BNT/inference/static/@belprop_inf_engine/private/junk, + BNT/inference/static/@belprop_inf_engine/private/parallel_protocol.m, + BNT/inference/static/@belprop_inf_engine/private/tree_protocol.m, + BNT/inference/static/@cond_gauss_inf_engine/cond_gauss_inf_engine.m, + BNT/inference/static/@cond_gauss_inf_engine/enter_evidence.m, + BNT/inference/static/@cond_gauss_inf_engine/marginal_nodes.m, + BNT/inference/static/@enumerative_inf_engine/enter_evidence.m, + BNT/inference/static/@enumerative_inf_engine/enumerative_inf_engine.m, + BNT/inference/static/@enumerative_inf_engine/marginal_nodes.m, + BNT/inference/static/@gaussian_inf_engine/enter_evidence.m, + BNT/inference/static/@gaussian_inf_engine/marginal_nodes.m, + BNT/inference/static/@gaussian_inf_engine/private/extract_params_from_gbn.m, + BNT/inference/static/@gibbs_sampling_inf_engine/enter_evidence.m, + BNT/inference/static/@gibbs_sampling_inf_engine/gibbs_sampling_inf_engine.m, + BNT/inference/static/@gibbs_sampling_inf_engine/marginal_nodes.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/CPT.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_children.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_families.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_families_dbn.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior.c, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior_dbn.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_strides.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/get_cpts.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.c, + BNT/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/my_sample_discrete.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/sample_single_discrete.c, + BNT/inference/static/@global_joint_inf_engine/global_joint_inf_engine.m, + BNT/inference/static/@global_joint_inf_engine/marginal_family.m, + BNT/inference/static/@jtree_inf_engine/cliques_from_engine.m, + BNT/inference/static/@jtree_inf_engine/clq_containing_nodes.m, + BNT/inference/static/@jtree_inf_engine/collect_evidence.m, + BNT/inference/static/@jtree_inf_engine/enter_soft_evidence.m, + BNT/inference/static/@jtree_inf_engine/marginal_family.m, + BNT/inference/static/@jtree_inf_engine/marginal_nodes.m, + BNT/inference/static/@jtree_inf_engine/set_fields.m, + BNT/inference/static/@jtree_inf_engine/Old/collect_evidence.m, + BNT/inference/static/@jtree_inf_engine/Old/distribute_evidence.m, + BNT/inference/static/@jtree_inf_engine/Old/enter_evidence.m, + BNT/inference/static/@jtree_inf_engine/Old/enter_soft_evidence.m, + BNT/inference/static/@jtree_limid_inf_engine/enter_evidence.m, + BNT/inference/static/@jtree_limid_inf_engine/jtree_limid_inf_engine.m, + BNT/inference/static/@jtree_limid_inf_engine/marginal_family.m, + BNT/inference/static/@jtree_limid_inf_engine/marginal_nodes.m, + BNT/inference/static/@jtree_limid_inf_engine/Old/marginal_family.m, + BNT/inference/static/@jtree_limid_inf_engine/Old/marginal_nodes_SS.m, + BNT/inference/static/@jtree_sparse_inf_engine/cliques_from_engine.m, + BNT/inference/static/@jtree_sparse_inf_engine/clq_containing_nodes.m, + BNT/inference/static/@jtree_sparse_inf_engine/collect_evidence.c, + BNT/inference/static/@jtree_sparse_inf_engine/distribute_evidence.c, + BNT/inference/static/@jtree_sparse_inf_engine/enter_evidence.m, + BNT/inference/static/@jtree_sparse_inf_engine/enter_soft_evidence.m, + BNT/inference/static/@jtree_sparse_inf_engine/init_pot.c, + BNT/inference/static/@jtree_sparse_inf_engine/marginal_family.m, + BNT/inference/static/@jtree_sparse_inf_engine/marginal_nodes.m, + BNT/inference/static/@jtree_sparse_inf_engine/set_fields.m, + BNT/inference/static/@jtree_sparse_inf_engine/old/collect_evidence.c, + BNT/inference/static/@jtree_sparse_inf_engine/old/distribute_evidence.c, + BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot.c, + BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot1.c, + BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot1.m, + BNT/inference/static/@likelihood_weighting_inf_engine/enter_evidence.m, + BNT/inference/static/@likelihood_weighting_inf_engine/likelihood_weighting_inf_engine.m, + BNT/inference/static/@likelihood_weighting_inf_engine/marginal_nodes.m, + BNT/inference/static/@pearl_inf_engine/enter_evidence.m, + BNT/inference/static/@pearl_inf_engine/loopy_converged.m, + BNT/inference/static/@pearl_inf_engine/marginal_nodes.m, + BNT/inference/static/@pearl_inf_engine/private/compute_bel.m, + BNT/inference/static/@pearl_inf_engine/private/prod_lambda_msgs.m, + BNT/inference/static/@pearl_inf_engine/private/tree_protocol.m, + BNT/inference/static/@quickscore_inf_engine/enter_evidence.m, + BNT/inference/static/@quickscore_inf_engine/marginal_nodes.m, + BNT/inference/static/@quickscore_inf_engine/quickscore_inf_engine.m, + BNT/inference/static/@quickscore_inf_engine/private/C_quickscore.c, + BNT/inference/static/@quickscore_inf_engine/private/nr.h, + BNT/inference/static/@quickscore_inf_engine/private/nrutil.c, + BNT/inference/static/@quickscore_inf_engine/private/nrutil.h, + BNT/inference/static/@quickscore_inf_engine/private/quickscore.m, + BNT/learning/bayes_update_params.m, + BNT/learning/bic_score_family.m, + BNT/learning/compute_cooling_schedule.m, + BNT/learning/dirichlet_score_family.m, + BNT/learning/kpm_learn_struct_mcmc.m, + BNT/learning/learn_params_em.m, + BNT/learning/learn_struct_dbn_reveal.m, + BNT/learning/learn_struct_pdag_ic_star.m, + BNT/learning/mcmc_sample_to_hist.m, BNT/learning/mk_schedule.m, + BNT/learning/mk_tetrad_data_file.m, + BNT/learning/score_dags_old.m, HMM/dhmm_logprob_brute_force.m, + HMM/dhmm_logprob_path.m, HMM/mdp_sample.m, Kalman/AR_to_SS.m, + Kalman/SS_to_AR.m, Kalman/convert_to_lagged_form.m, + Kalman/ensure_AR.m, Kalman/eval_AR_perf.m, + Kalman/kalman_filter.m, Kalman/kalman_smoother.m, + Kalman/kalman_update.m, Kalman/learn_AR.m, + Kalman/learn_AR_diagonal.m, Kalman/learn_kalman.m, + Kalman/smooth_update.m, + BNT/general/convert_dbn_CPDs_to_tables_slow.m, + BNT/general/dispcpt.m, BNT/general/linear_gaussian_to_cpot.m, + BNT/general/partition_matrix_vec_3.m, + BNT/general/shrink_obs_dims_in_gaussian.m, + BNT/general/shrink_obs_dims_in_table.m, + BNT/potentials/CPD_to_pot.m, BNT/potentials/README, + BNT/potentials/check_for_cd_arcs.m, + BNT/potentials/determine_pot_type.m, + BNT/potentials/mk_initial_pot.m, + BNT/potentials/@cgpot/cg_can_to_mom.m, + BNT/potentials/@cgpot/cg_mom_to_can.m, + BNT/potentials/@cgpot/cgpot.m, BNT/potentials/@cgpot/display.m, + BNT/potentials/@cgpot/divide_by_pot.m, + BNT/potentials/@cgpot/domain_pot.m, + BNT/potentials/@cgpot/enter_cts_evidence_pot.m, + BNT/potentials/@cgpot/enter_discrete_evidence_pot.m, + BNT/potentials/@cgpot/marginalize_pot.m, + BNT/potentials/@cgpot/multiply_by_pot.m, + BNT/potentials/@cgpot/multiply_pots.m, + BNT/potentials/@cgpot/normalize_pot.m, + BNT/potentials/@cgpot/pot_to_marginal.m, + BNT/potentials/@cgpot/Old/normalize_pot.m, + BNT/potentials/@cgpot/Old/simple_marginalize_pot.m, + BNT/potentials/@cpot/cpot.m, BNT/potentials/@cpot/cpot_to_mpot.m, + BNT/potentials/@cpot/display.m, + BNT/potentials/@cpot/divide_by_pot.m, + BNT/potentials/@cpot/domain_pot.m, + BNT/potentials/@cpot/enter_cts_evidence_pot.m, + BNT/potentials/@cpot/marginalize_pot.m, + BNT/potentials/@cpot/multiply_by_pot.m, + BNT/potentials/@cpot/multiply_pots.m, + BNT/potentials/@cpot/normalize_pot.m, + BNT/potentials/@cpot/pot_to_marginal.m, + BNT/potentials/@cpot/rescale_pot.m, + BNT/potentials/@cpot/set_domain_pot.m, + BNT/potentials/@cpot/Old/cpot_to_mpot.m, + BNT/potentials/@cpot/Old/normalize_pot.convert.m, + BNT/potentials/@dpot/approxeq_pot.m, + BNT/potentials/@dpot/display.m, + BNT/potentials/@dpot/domain_pot.m, + BNT/potentials/@dpot/dpot_to_table.m, + BNT/potentials/@dpot/get_fields.m, + BNT/potentials/@dpot/multiply_pots.m, + BNT/potentials/@dpot/pot_to_marginal.m, + BNT/potentials/@dpot/set_domain_pot.m, + BNT/potentials/@mpot/display.m, + BNT/potentials/@mpot/marginalize_pot.m, + BNT/potentials/@mpot/mpot.m, BNT/potentials/@mpot/mpot_to_cpot.m, + BNT/potentials/@mpot/normalize_pot.m, + BNT/potentials/@mpot/pot_to_marginal.m, + BNT/potentials/@mpot/rescale_pot.m, + BNT/potentials/@upot/approxeq_pot.m, + BNT/potentials/@upot/display.m, + BNT/potentials/@upot/divide_by_pot.m, + BNT/potentials/@upot/marginalize_pot.m, + BNT/potentials/@upot/multiply_by_pot.m, + BNT/potentials/@upot/normalize_pot.m, + BNT/potentials/@upot/pot_to_marginal.m, + BNT/potentials/@upot/upot.m, + BNT/potentials/@upot/upot_to_opt_policy.m, + BNT/potentials/Old/comp_eff_node_sizes.m, + BNT/potentials/Tables/divide_by_sparse_table.c, + BNT/potentials/Tables/divide_by_table.c, + BNT/potentials/Tables/marg_sparse_table.c, + BNT/potentials/Tables/marg_table.c, + BNT/potentials/Tables/mult_by_sparse_table.c, + BNT/potentials/Tables/rep_mult.c, HMM/mk_leftright_transmat.m: + Initial import of code base from Kevin Murphy. + +2002-05-29 08:59 yozhik + + * BNT/@assocarray/assocarray.m, + BNT/CPDs/@boolean_CPD/boolean_CPD.m, + BNT/CPDs/@discrete_CPD/CPD_to_lambda_msg.m, + BNT/CPDs/@discrete_CPD/CPD_to_pi.m, + BNT/CPDs/@discrete_CPD/CPD_to_scgpot.m, + BNT/CPDs/@discrete_CPD/README, + BNT/CPDs/@discrete_CPD/convert_CPD_to_table_hidden_ps.m, + BNT/CPDs/@discrete_CPD/convert_obs_CPD_to_table.m, + BNT/CPDs/@discrete_CPD/convert_to_sparse_table.c, + BNT/CPDs/@discrete_CPD/convert_to_table.m, + BNT/CPDs/@discrete_CPD/discrete_CPD.m, + BNT/CPDs/@discrete_CPD/dom_sizes.m, + BNT/CPDs/@discrete_CPD/log_prob_node.m, + BNT/CPDs/@discrete_CPD/prob_node.m, + BNT/CPDs/@discrete_CPD/sample_node.m, + BNT/CPDs/@discrete_CPD/Old/convert_to_pot.m, + BNT/CPDs/@discrete_CPD/Old/convert_to_table.m, + BNT/CPDs/@discrete_CPD/Old/prob_CPD.m, + BNT/CPDs/@discrete_CPD/Old/prob_node.m, + BNT/CPDs/@discrete_CPD/private/prod_CPT_and_pi_msgs.m, + BNT/CPDs/@gaussian_CPD/CPD_to_lambda_msg.m, + BNT/CPDs/@gaussian_CPD/CPD_to_pi.m, + BNT/CPDs/@gaussian_CPD/CPD_to_scgpot.m, + BNT/CPDs/@gaussian_CPD/adjustable_CPD.m, + BNT/CPDs/@gaussian_CPD/convert_CPD_to_table_hidden_ps.m, + BNT/CPDs/@gaussian_CPD/display.m, + BNT/CPDs/@gaussian_CPD/get_field.m, + BNT/CPDs/@gaussian_CPD/reset_ess.m, + BNT/CPDs/@gaussian_CPD/sample_node.m, + BNT/CPDs/@gaussian_CPD/set_fields.m, + BNT/CPDs/@gaussian_CPD/Old/CPD_to_lambda_msg.m, + BNT/CPDs/@gaussian_CPD/Old/gaussian_CPD.m, + BNT/CPDs/@gaussian_CPD/Old/log_prob_node.m, + BNT/CPDs/@gaussian_CPD/Old/update_ess.m, + BNT/CPDs/@gaussian_CPD/Old/update_tied_ess.m, + BNT/CPDs/@gaussian_CPD/private/CPD_to_linear_gaussian.m, + BNT/CPDs/@generic_CPD/README, + BNT/CPDs/@generic_CPD/adjustable_CPD.m, + BNT/CPDs/@generic_CPD/display.m, + BNT/CPDs/@generic_CPD/generic_CPD.m, + BNT/CPDs/@generic_CPD/log_prior.m, + BNT/CPDs/@generic_CPD/set_clamped.m, + BNT/CPDs/@generic_CPD/Old/BIC_score_CPD.m, + BNT/CPDs/@generic_CPD/Old/CPD_to_dpots.m, + BNT/CPDs/@gmux_CPD/CPD_to_lambda_msg.m, + BNT/CPDs/@gmux_CPD/convert_to_pot.m, + BNT/CPDs/@gmux_CPD/CPD_to_pi.m, BNT/CPDs/@gmux_CPD/display.m, + BNT/CPDs/@gmux_CPD/gmux_CPD.m, BNT/CPDs/@gmux_CPD/sample_node.m, + BNT/CPDs/@gmux_CPD/Old/gmux_CPD.m, + BNT/CPDs/@hhmmF_CPD/log_prior.m, + BNT/CPDs/@hhmmF_CPD/maximize_params.m, + BNT/CPDs/@hhmmF_CPD/reset_ess.m, BNT/CPDs/@hhmmQ_CPD/log_prior.m, + BNT/CPDs/@hhmmQ_CPD/reset_ess.m, + BNT/CPDs/@mlp_CPD/convert_to_table.m, + BNT/CPDs/@mlp_CPD/maximize_params.m, BNT/CPDs/@mlp_CPD/mlp_CPD.m, + BNT/CPDs/@mlp_CPD/reset_ess.m, BNT/CPDs/@mlp_CPD/update_ess.m, + BNT/CPDs/@noisyor_CPD/CPD_to_lambda_msg.m, + BNT/CPDs/@noisyor_CPD/CPD_to_pi.m, + BNT/CPDs/@noisyor_CPD/noisyor_CPD.m, + BNT/CPDs/@noisyor_CPD/private/sum_prod_CPD_and_pi_msgs.m, + BNT/CPDs/@root_CPD/CPD_to_pi.m, + BNT/CPDs/@root_CPD/convert_to_pot.m, + BNT/CPDs/@root_CPD/log_marg_prob_node.m, + BNT/CPDs/@root_CPD/log_prob_node.m, + BNT/CPDs/@root_CPD/root_CPD.m, BNT/CPDs/@root_CPD/sample_node.m, + BNT/CPDs/@root_CPD/Old/CPD_to_CPT.m, + BNT/CPDs/@softmax_CPD/convert_to_pot.m, + BNT/CPDs/@softmax_CPD/display.m, + BNT/CPDs/@softmax_CPD/get_field.m, + BNT/CPDs/@softmax_CPD/maximize_params.m, + BNT/CPDs/@softmax_CPD/reset_ess.m, + BNT/CPDs/@softmax_CPD/sample_node.m, + BNT/CPDs/@softmax_CPD/set_fields.m, + BNT/CPDs/@softmax_CPD/update_ess.m, + BNT/CPDs/@softmax_CPD/private/extract_params.m, + BNT/CPDs/@tabular_CPD/CPD_to_CPT.m, + BNT/CPDs/@tabular_CPD/bayes_update_params.m, + BNT/CPDs/@tabular_CPD/log_nextcase_prob_node.m, + BNT/CPDs/@tabular_CPD/log_prior.m, + BNT/CPDs/@tabular_CPD/reset_ess.m, + BNT/CPDs/@tabular_CPD/update_ess.m, + BNT/CPDs/@tabular_CPD/update_ess_simple.m, + BNT/CPDs/@tabular_CPD/Old/BIC_score_CPD.m, + BNT/CPDs/@tabular_CPD/Old/bayesian_score_CPD.m, + BNT/CPDs/@tabular_CPD/Old/log_marg_prob_node_case.m, + BNT/CPDs/@tabular_CPD/Old/mult_CPD_and_pi_msgs.m, + BNT/CPDs/@tabular_CPD/Old/prob_CPT.m, + BNT/CPDs/@tabular_CPD/Old/prob_node.m, + BNT/CPDs/@tabular_CPD/Old/sample_node.m, + BNT/CPDs/@tabular_CPD/Old/sample_node_single_case.m, + BNT/CPDs/@tabular_CPD/Old/tabular_CPD.m, + BNT/CPDs/@tabular_CPD/Old/update_params.m, + BNT/CPDs/@tabular_decision_node/CPD_to_CPT.m, + BNT/CPDs/@tabular_decision_node/display.m, + BNT/CPDs/@tabular_decision_node/get_field.m, + BNT/CPDs/@tabular_decision_node/set_fields.m, + BNT/CPDs/@tabular_decision_node/tabular_decision_node.m, + BNT/CPDs/@tabular_decision_node/Old/tabular_decision_node.m, + BNT/CPDs/@tabular_kernel/convert_to_pot.m, + BNT/CPDs/@tabular_kernel/convert_to_table.m, + BNT/CPDs/@tabular_kernel/get_field.m, + BNT/CPDs/@tabular_kernel/set_fields.m, + BNT/CPDs/@tabular_kernel/tabular_kernel.m, + BNT/CPDs/@tabular_kernel/Old/tabular_kernel.m, + BNT/CPDs/@tabular_utility_node/convert_to_pot.m, + BNT/CPDs/@tabular_utility_node/display.m, + BNT/CPDs/@tabular_utility_node/tabular_utility_node.m, + BNT/CPDs/@tree_CPD/display.m, + BNT/CPDs/@tree_CPD/evaluate_tree_performance.m, + BNT/CPDs/@tree_CPD/get_field.m, + BNT/CPDs/@tree_CPD/learn_params.m, BNT/CPDs/@tree_CPD/readme.txt, + BNT/CPDs/@tree_CPD/set_fields.m, BNT/CPDs/@tree_CPD/tree_CPD.m, + BNT/CPDs/Old/@linear_gaussian_CPD/linear_gaussian_CPD.m, + BNT/CPDs/Old/@linear_gaussian_CPD/log_marg_prob_node.m, + BNT/CPDs/Old/@linear_gaussian_CPD/update_params_complete.m, + BNT/CPDs/Old/@root_gaussian_CPD/log_marg_prob_node.m, + BNT/CPDs/Old/@root_gaussian_CPD/root_gaussian_CPD.m, + BNT/CPDs/Old/@root_gaussian_CPD/update_params_complete.m, + BNT/CPDs/Old/@tabular_chance_node/CPD_to_upot.m, + BNT/CPDs/Old/@tabular_chance_node/tabular_chance_node.m, + BNT/examples/dynamic/bat1.m, BNT/examples/dynamic/bkff1.m, + BNT/examples/dynamic/chmm1.m, + BNT/examples/dynamic/cmp_inference_dbn.m, + BNT/examples/dynamic/cmp_learning_dbn.m, + BNT/examples/dynamic/cmp_online_inference.m, + BNT/examples/dynamic/fhmm_infer.m, + BNT/examples/dynamic/filter_test1.m, + BNT/examples/dynamic/kalman1.m, + BNT/examples/dynamic/kjaerulff1.m, + BNT/examples/dynamic/loopy_dbn1.m, + BNT/examples/dynamic/mk_collage_from_clqs.m, + BNT/examples/dynamic/mk_fhmm.m, BNT/examples/dynamic/reveal1.m, + BNT/examples/dynamic/scg_dbn.m, + BNT/examples/dynamic/skf_data_assoc_gmux.m, + BNT/examples/dynamic/HHMM/add_hhmm_end_state.m, + BNT/examples/dynamic/HHMM/hhmm_jtree_clqs.m, + BNT/examples/dynamic/HHMM/mk_hhmm_topo.m, + BNT/examples/dynamic/HHMM/mk_hhmm_topo_F1.m, + BNT/examples/dynamic/HHMM/pretty_print_hhmm_parse.m, + BNT/examples/dynamic/HHMM/Motif/fixed_args_mk_motif_hhmm.m, + BNT/examples/dynamic/HHMM/Motif/mk_motif_hhmm.m, + BNT/examples/dynamic/HHMM/Motif/sample_motif_hhmm.m, + BNT/examples/dynamic/HHMM/Old/mk_abcd_hhmm.m, + BNT/examples/dynamic/HHMM/Old/mk_arrow_alpha_hhmm3.m, + BNT/examples/dynamic/HHMM/Old/mk_hhmm2.m, + BNT/examples/dynamic/HHMM/Old/mk_hhmm3.m, + BNT/examples/dynamic/HHMM/Old/mk_hhmm3_args.m, + BNT/examples/dynamic/HHMM/Old/motif_hhmm.m, + BNT/examples/dynamic/HHMM/Old/remove_hhmm_end_state.m, + BNT/examples/dynamic/HHMM/Square/get_square_data.m, + BNT/examples/dynamic/HHMM/Square/hhmm_inference.m, + BNT/examples/dynamic/HHMM/Square/is_F2_true_D3.m, + BNT/examples/dynamic/HHMM/Square/learn_square_hhmm_discrete.m, + BNT/examples/dynamic/HHMM/Square/mk_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/plot_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/sample_square_hhmm_cts.m, + BNT/examples/dynamic/HHMM/Square/sample_square_hhmm_discrete.m, + BNT/examples/dynamic/HHMM/Square/square4.mat, + BNT/examples/dynamic/HHMM/Square/square4_cases.mat, + BNT/examples/dynamic/HHMM/Square/test_square_fig.m, + BNT/examples/dynamic/HHMM/Square/test_square_fig.mat, + BNT/examples/dynamic/HHMM/Square/Old/learn_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/Old/mk_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/Old/plot_square_hhmm.m, + BNT/examples/dynamic/HHMM/Square/Old/sample_square_hhmm.m, + BNT/examples/dynamic/Old/chmm1.m, + BNT/examples/dynamic/Old/cmp_inference.m, + BNT/examples/dynamic/Old/kalman1.m, + BNT/examples/dynamic/Old/old.water1.m, + BNT/examples/dynamic/Old/online1.m, + BNT/examples/dynamic/Old/online2.m, + BNT/examples/dynamic/Old/scg_dbn.m, + BNT/examples/dynamic/SLAM/mk_gmux_robot_dbn.m, + BNT/examples/dynamic/SLAM/mk_linear_slam.m, + BNT/examples/dynamic/SLAM/slam_kf.m, + BNT/examples/dynamic/SLAM/slam_offline_loopy.m, + BNT/examples/dynamic/SLAM/slam_partial_kf.m, + BNT/examples/dynamic/SLAM/slam_stationary_loopy.m, + BNT/examples/dynamic/SLAM/Old/offline_loopy_slam.m, + BNT/examples/dynamic/SLAM/Old/paskin1.m, + BNT/examples/dynamic/SLAM/Old/skf_data_assoc_gmux2.m, + BNT/examples/dynamic/SLAM/Old/slam_kf.m, + BNT/examples/limids/id1.m, BNT/examples/limids/pigs1.m, + BNT/examples/static/cg1.m, BNT/examples/static/cg2.m, + BNT/examples/static/discrete2.m, BNT/examples/static/discrete3.m, + BNT/examples/static/fa1.m, BNT/examples/static/gaussian1.m, + BNT/examples/static/gibbs_test1.m, BNT/examples/static/lw1.m, + BNT/examples/static/mfa1.m, BNT/examples/static/mixexp1.m, + BNT/examples/static/mixexp2.m, BNT/examples/static/mixexp3.m, + BNT/examples/static/mog1.m, BNT/examples/static/qmr1.m, + BNT/examples/static/sample1.m, BNT/examples/static/softmax1.m, + BNT/examples/static/Belprop/belprop_loop1_discrete.m, + BNT/examples/static/Belprop/belprop_loop1_gauss.m, + BNT/examples/static/Belprop/belprop_loopy_cg.m, + BNT/examples/static/Belprop/belprop_loopy_discrete.m, + BNT/examples/static/Belprop/belprop_loopy_gauss.m, + BNT/examples/static/Belprop/belprop_polytree_cg.m, + BNT/examples/static/Belprop/belprop_polytree_gauss.m, + BNT/examples/static/Belprop/bp1.m, + BNT/examples/static/Belprop/gmux1.m, + BNT/examples/static/Brutti/Belief_IOhmm.m, + BNT/examples/static/Brutti/Belief_hmdt.m, + BNT/examples/static/Brutti/Belief_hme.m, + BNT/examples/static/Brutti/Sigmoid_Belief.m, + BNT/examples/static/HME/HMEforMatlab.jpg, + BNT/examples/static/HME/README, BNT/examples/static/HME/fhme.m, + BNT/examples/static/HME/gen_data.m, + BNT/examples/static/HME/hme_class_plot.m, + BNT/examples/static/HME/hme_reg_plot.m, + BNT/examples/static/HME/hme_topobuilder.m, + BNT/examples/static/HME/test_data_class.mat, + BNT/examples/static/HME/test_data_class2.mat, + BNT/examples/static/HME/test_data_reg.mat, + BNT/examples/static/HME/train_data_class.mat, + BNT/examples/static/HME/train_data_reg.mat, + BNT/examples/static/Misc/mixexp_data.txt, + BNT/examples/static/Misc/mixexp_graddesc.m, + BNT/examples/static/Misc/mixexp_plot.m, + BNT/examples/static/Misc/sprinkler.bif, + BNT/examples/static/Models/mk_cancer_bnet.m, + BNT/examples/static/Models/mk_car_bnet.m, + BNT/examples/static/Models/mk_ideker_bnet.m, + BNT/examples/static/Models/mk_incinerator_bnet.m, + BNT/examples/static/Models/mk_markov_chain_bnet.m, + BNT/examples/static/Models/mk_minimal_qmr_bnet.m, + BNT/examples/static/Models/mk_qmr_bnet.m, + BNT/examples/static/Models/mk_vstruct_bnet.m, + BNT/examples/static/Models/Old/mk_hmm_bnet.m, + BNT/examples/static/SCG/scg1.m, BNT/examples/static/SCG/scg2.m, + BNT/examples/static/SCG/scg3.m, + BNT/examples/static/SCG/scg_3node.m, + BNT/examples/static/SCG/scg_unstable.m, + BNT/examples/static/StructLearn/bic1.m, + BNT/examples/static/StructLearn/cooper_yoo.m, + BNT/examples/static/StructLearn/k2demo1.m, + BNT/examples/static/StructLearn/mcmc1.m, + BNT/examples/static/StructLearn/pc1.m, + BNT/examples/static/StructLearn/pc2.m, + BNT/examples/static/Zoubin/README, + BNT/examples/static/Zoubin/csum.m, + BNT/examples/static/Zoubin/ffa.m, + BNT/examples/static/Zoubin/mfa.m, + BNT/examples/static/Zoubin/mfa_cl.m, + BNT/examples/static/Zoubin/mfademo.m, + BNT/examples/static/Zoubin/rdiv.m, + BNT/examples/static/Zoubin/rprod.m, + BNT/examples/static/Zoubin/rsum.m, + BNT/examples/static/dtree/test_housing.m, + BNT/examples/static/dtree/test_restaurants.m, + BNT/examples/static/dtree/test_zoo1.m, + BNT/examples/static/dtree/tmp.dot, + BNT/examples/static/dtree/transform_data_into_bnt_format.m, + BNT/examples/static/fgraph/fg2.m, + BNT/examples/static/fgraph/fg3.m, + BNT/examples/static/fgraph/fg_mrf1.m, + BNT/examples/static/fgraph/fg_mrf2.m, + BNT/general/bnet_to_fgraph.m, + BNT/general/compute_fwd_interface.m, + BNT/general/compute_interface_nodes.m, + BNT/general/compute_minimal_interface.m, + BNT/general/dbn_to_bnet.m, + BNT/general/determine_elim_constraints.m, + BNT/general/do_intervention.m, BNT/general/dsep.m, + BNT/general/enumerate_scenarios.m, BNT/general/fgraph_to_bnet.m, + BNT/general/log_lik_complete.m, + BNT/general/log_marg_lik_complete.m, BNT/general/mk_bnet.m, + BNT/general/mk_fgraph.m, BNT/general/mk_limid.m, + BNT/general/mk_mutilated_samples.m, + BNT/general/mk_slice_and_half_dbn.m, + BNT/general/partition_dbn_nodes.m, + BNT/general/sample_bnet_nocell.m, BNT/general/sample_dbn.m, + BNT/general/score_bnet_complete.m, + BNT/general/unroll_dbn_topology.m, + BNT/general/Old/bnet_to_gdl_graph.m, + BNT/general/Old/calc_mpe_bucket.m, + BNT/general/Old/calc_mpe_dbn.m, + BNT/general/Old/calc_mpe_given_inf_engine.m, + BNT/general/Old/calc_mpe_global.m, + BNT/general/Old/compute_interface_nodes.m, + BNT/general/Old/mk_gdl_graph.m, GraphViz/draw_dbn.m, + GraphViz/make_layout.m, BNT/license.gpl.txt, + BNT/general/add_evidence_to_gmarginal.m, + BNT/inference/@inf_engine/bnet_from_engine.m, + BNT/inference/@inf_engine/get_field.m, + BNT/inference/@inf_engine/inf_engine.m, + BNT/inference/@inf_engine/marginal_family.m, + BNT/inference/@inf_engine/set_fields.m, + BNT/inference/@inf_engine/update_engine.m, + BNT/inference/@inf_engine/Old/marginal_family_pot.m, + BNT/inference/@inf_engine/Old/observed_nodes.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/bk_ff_hmm_inf_engine.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_init_bel.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_marginal_from_bel.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_predict_bel.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/dbn_update_bel1.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_family.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/bk_ff_fb.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/combine_marginals_into_joint.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/dbn_to_hmm.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_mat.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/mk_hmm_obs_lik_vec1.m, + BNT/inference/dynamic/@bk_ff_hmm_inf_engine/private/project_joint_onto_marginals.m, + BNT/inference/dynamic/@bk_inf_engine/bk_inf_engine.m, + BNT/inference/dynamic/@bk_inf_engine/dbn_init_bel.m, + BNT/inference/dynamic/@bk_inf_engine/dbn_marginal_from_bel.m, + BNT/inference/dynamic/@bk_inf_engine/dbn_update_bel.m, + BNT/inference/dynamic/@bk_inf_engine/dbn_update_bel1.m, + BNT/inference/dynamic/@bk_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@bk_inf_engine/marginal_family.m, + BNT/inference/dynamic/@bk_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@bk_inf_engine/update_engine.m, + BNT/inference/dynamic/@ff_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/enter_soft_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/ff_inf_engine.m, + BNT/inference/dynamic/@ff_inf_engine/filter_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/marginal_family.m, + BNT/inference/dynamic/@ff_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@ff_inf_engine/smooth_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence.m, + BNT/inference/dynamic/@ff_inf_engine/Old/enter_soft_evidence1.m, + BNT/inference/dynamic/@ff_inf_engine/Old/marginal_family.m, + BNT/inference/dynamic/@frontier_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@frontier_inf_engine/enter_soft_evidence.m, + BNT/inference/dynamic/@frontier_inf_engine/frontier_inf_engine.m, + BNT/inference/dynamic/@frontier_inf_engine/marginal_family.m, + BNT/inference/dynamic/@frontier_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@frontier_inf_engine/set_fwdback.m, + BNT/inference/dynamic/@hmm_inf_engine/update_engine.m, + BNT/inference/dynamic/@hmm_inf_engine/Old/dhmm_inf_engine.m, + BNT/inference/dynamic/@hmm_inf_engine/Old/marginal_family.m, + BNT/inference/dynamic/@hmm_inf_engine/Old/marginal_nodes.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/marginal_family.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence1.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence2.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence3.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/enter_soft_evidence4.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Broken/marginal_nodes.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_nonint.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/enter_soft_evidence_trans.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine1.m, + BNT/inference/dynamic/@jtree_dbn_inf_engine/Old/jtree_dbn_inf_engine2.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/jtree_unrolled_dbn_inf_engine.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_family.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/update_engine.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_family.m, + BNT/inference/dynamic/@jtree_unrolled_dbn_inf_engine/Old/marginal_nodes.m, + BNT/inference/dynamic/@kalman_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@kalman_inf_engine/kalman_inf_engine.m, + BNT/inference/dynamic/@kalman_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@kalman_inf_engine/update_engine.m, + BNT/inference/dynamic/@kalman_inf_engine/private/dbn_to_lds.m, + BNT/inference/dynamic/@kalman_inf_engine/private/extract_params_from_gbn.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/enter_soft_ev.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/pearl_dbn_inf_engine.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/correct_smooth.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/enter_evidence.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/filter_evidence_obj_oriented.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/smooth_evidence_fast.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/Old/wrong_smooth.m, + BNT/inference/dynamic/@pearl_dbn_inf_engine/private/init_pearl_dbn_ev_msgs.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/enter_evidence.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_family.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/marginal_nodes.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/pearl_unrolled_dbn_inf_engine.m, + BNT/inference/dynamic/@pearl_unrolled_dbn_inf_engine/update_engine.m, + BNT/inference/online/@filter_engine/bnet_from_engine.m, + BNT/inference/online/@filter_engine/enter_evidence.m, + BNT/inference/online/@filter_engine/filter_engine.m, + BNT/inference/online/@filter_engine/marginal_family.m, + BNT/inference/online/@filter_engine/marginal_nodes.m, + BNT/inference/online/@hmm_2TBN_inf_engine/back.m, + BNT/inference/online/@hmm_2TBN_inf_engine/backT.m, + BNT/inference/online/@hmm_2TBN_inf_engine/fwd.m, + BNT/inference/online/@hmm_2TBN_inf_engine/fwd1.m, + BNT/inference/online/@hmm_2TBN_inf_engine/update_engine.m, + BNT/inference/online/@jtree_2TBN_inf_engine/marginal_family.m, + BNT/inference/online/@jtree_2TBN_inf_engine/marginal_nodes.m, + BNT/inference/online/@jtree_2TBN_inf_engine/Old/jtree_2TBN_inf_engine.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/back.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/back1.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/backT.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/enter_evidence.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/fwd.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/fwd1.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/jtree_sparse_2TBN_inf_engine.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_family.m, + BNT/inference/online/@jtree_sparse_2TBN_inf_engine/marginal_nodes.m, + BNT/inference/online/@smoother_engine/bnet_from_engine.m, + BNT/inference/online/@smoother_engine/marginal_family.m, + BNT/inference/online/@smoother_engine/marginal_nodes.m, + BNT/inference/online/@smoother_engine/smoother_engine.m, + BNT/inference/online/@smoother_engine/update_engine.m, + BNT/inference/static/@belprop_fg_inf_engine/belprop_fg_inf_engine.m, + BNT/inference/static/@belprop_fg_inf_engine/enter_evidence.m, + BNT/inference/static/@belprop_fg_inf_engine/loopy_converged.m, + BNT/inference/static/@belprop_fg_inf_engine/marginal_nodes.m, + BNT/inference/static/@belprop_fg_inf_engine/set_params.m, + BNT/inference/static/@belprop_inf_engine/enter_evidence.m, + BNT/inference/static/@belprop_inf_engine/loopy_converged.m, + BNT/inference/static/@belprop_inf_engine/marginal_family.m, + BNT/inference/static/@belprop_inf_engine/marginal_nodes.m, + BNT/inference/static/@belprop_inf_engine/Old/belprop_gdl_inf_engine.m, + BNT/inference/static/@belprop_inf_engine/Old/belprop_inf_engine_nostr.m, + BNT/inference/static/@belprop_inf_engine/Old/enter_evidence.m, + BNT/inference/static/@belprop_inf_engine/Old/enter_evidence1.m, + BNT/inference/static/@belprop_inf_engine/Old/marginal_domain.m, + BNT/inference/static/@belprop_inf_engine/private/junk, + BNT/inference/static/@belprop_inf_engine/private/parallel_protocol.m, + BNT/inference/static/@belprop_inf_engine/private/tree_protocol.m, + BNT/inference/static/@cond_gauss_inf_engine/cond_gauss_inf_engine.m, + BNT/inference/static/@cond_gauss_inf_engine/enter_evidence.m, + BNT/inference/static/@cond_gauss_inf_engine/marginal_nodes.m, + BNT/inference/static/@enumerative_inf_engine/enter_evidence.m, + BNT/inference/static/@enumerative_inf_engine/enumerative_inf_engine.m, + BNT/inference/static/@enumerative_inf_engine/marginal_nodes.m, + BNT/inference/static/@gaussian_inf_engine/enter_evidence.m, + BNT/inference/static/@gaussian_inf_engine/marginal_nodes.m, + BNT/inference/static/@gaussian_inf_engine/private/extract_params_from_gbn.m, + BNT/inference/static/@gibbs_sampling_inf_engine/enter_evidence.m, + BNT/inference/static/@gibbs_sampling_inf_engine/gibbs_sampling_inf_engine.m, + BNT/inference/static/@gibbs_sampling_inf_engine/marginal_nodes.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/CPT.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_children.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_families.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_families_dbn.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior.c, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_posterior_dbn.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/compute_strides.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/get_cpts.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.c, + BNT/inference/static/@gibbs_sampling_inf_engine/private/get_slice_dbn.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/my_sample_discrete.m, + BNT/inference/static/@gibbs_sampling_inf_engine/private/sample_single_discrete.c, + BNT/inference/static/@global_joint_inf_engine/global_joint_inf_engine.m, + BNT/inference/static/@global_joint_inf_engine/marginal_family.m, + BNT/inference/static/@jtree_inf_engine/cliques_from_engine.m, + BNT/inference/static/@jtree_inf_engine/clq_containing_nodes.m, + BNT/inference/static/@jtree_inf_engine/collect_evidence.m, + BNT/inference/static/@jtree_inf_engine/enter_soft_evidence.m, + BNT/inference/static/@jtree_inf_engine/marginal_family.m, + BNT/inference/static/@jtree_inf_engine/marginal_nodes.m, + BNT/inference/static/@jtree_inf_engine/set_fields.m, + BNT/inference/static/@jtree_inf_engine/Old/collect_evidence.m, + BNT/inference/static/@jtree_inf_engine/Old/distribute_evidence.m, + BNT/inference/static/@jtree_inf_engine/Old/enter_evidence.m, + BNT/inference/static/@jtree_inf_engine/Old/enter_soft_evidence.m, + BNT/inference/static/@jtree_limid_inf_engine/enter_evidence.m, + BNT/inference/static/@jtree_limid_inf_engine/jtree_limid_inf_engine.m, + BNT/inference/static/@jtree_limid_inf_engine/marginal_family.m, + BNT/inference/static/@jtree_limid_inf_engine/marginal_nodes.m, + BNT/inference/static/@jtree_limid_inf_engine/Old/marginal_family.m, + BNT/inference/static/@jtree_limid_inf_engine/Old/marginal_nodes_SS.m, + BNT/inference/static/@jtree_sparse_inf_engine/cliques_from_engine.m, + BNT/inference/static/@jtree_sparse_inf_engine/clq_containing_nodes.m, + BNT/inference/static/@jtree_sparse_inf_engine/collect_evidence.c, + BNT/inference/static/@jtree_sparse_inf_engine/distribute_evidence.c, + BNT/inference/static/@jtree_sparse_inf_engine/enter_evidence.m, + BNT/inference/static/@jtree_sparse_inf_engine/enter_soft_evidence.m, + BNT/inference/static/@jtree_sparse_inf_engine/init_pot.c, + BNT/inference/static/@jtree_sparse_inf_engine/marginal_family.m, + BNT/inference/static/@jtree_sparse_inf_engine/marginal_nodes.m, + BNT/inference/static/@jtree_sparse_inf_engine/set_fields.m, + BNT/inference/static/@jtree_sparse_inf_engine/old/collect_evidence.c, + BNT/inference/static/@jtree_sparse_inf_engine/old/distribute_evidence.c, + BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot.c, + BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot1.c, + BNT/inference/static/@jtree_sparse_inf_engine/old/init_pot1.m, + BNT/inference/static/@likelihood_weighting_inf_engine/enter_evidence.m, + BNT/inference/static/@likelihood_weighting_inf_engine/likelihood_weighting_inf_engine.m, + BNT/inference/static/@likelihood_weighting_inf_engine/marginal_nodes.m, + BNT/inference/static/@pearl_inf_engine/enter_evidence.m, + BNT/inference/static/@pearl_inf_engine/loopy_converged.m, + BNT/inference/static/@pearl_inf_engine/marginal_nodes.m, + BNT/inference/static/@pearl_inf_engine/private/compute_bel.m, + BNT/inference/static/@pearl_inf_engine/private/prod_lambda_msgs.m, + BNT/inference/static/@pearl_inf_engine/private/tree_protocol.m, + BNT/inference/static/@quickscore_inf_engine/enter_evidence.m, + BNT/inference/static/@quickscore_inf_engine/marginal_nodes.m, + BNT/inference/static/@quickscore_inf_engine/quickscore_inf_engine.m, + BNT/inference/static/@quickscore_inf_engine/private/C_quickscore.c, + BNT/inference/static/@quickscore_inf_engine/private/nr.h, + BNT/inference/static/@quickscore_inf_engine/private/nrutil.c, + BNT/inference/static/@quickscore_inf_engine/private/nrutil.h, + BNT/inference/static/@quickscore_inf_engine/private/quickscore.m, + BNT/learning/bayes_update_params.m, + BNT/learning/bic_score_family.m, + BNT/learning/compute_cooling_schedule.m, + BNT/learning/dirichlet_score_family.m, + BNT/learning/kpm_learn_struct_mcmc.m, + BNT/learning/learn_params_em.m, + BNT/learning/learn_struct_dbn_reveal.m, + BNT/learning/learn_struct_pdag_ic_star.m, + BNT/learning/mcmc_sample_to_hist.m, BNT/learning/mk_schedule.m, + BNT/learning/mk_tetrad_data_file.m, + BNT/learning/score_dags_old.m, HMM/dhmm_logprob_brute_force.m, + HMM/dhmm_logprob_path.m, HMM/mdp_sample.m, Kalman/AR_to_SS.m, + Kalman/SS_to_AR.m, Kalman/convert_to_lagged_form.m, + Kalman/ensure_AR.m, Kalman/eval_AR_perf.m, + Kalman/kalman_filter.m, Kalman/kalman_smoother.m, + Kalman/kalman_update.m, Kalman/learn_AR.m, + Kalman/learn_AR_diagonal.m, Kalman/learn_kalman.m, + Kalman/smooth_update.m, + BNT/general/convert_dbn_CPDs_to_tables_slow.m, + BNT/general/dispcpt.m, BNT/general/linear_gaussian_to_cpot.m, + BNT/general/partition_matrix_vec_3.m, + BNT/general/shrink_obs_dims_in_gaussian.m, + BNT/general/shrink_obs_dims_in_table.m, + BNT/potentials/CPD_to_pot.m, BNT/potentials/README, + BNT/potentials/check_for_cd_arcs.m, + BNT/potentials/determine_pot_type.m, + BNT/potentials/mk_initial_pot.m, + BNT/potentials/@cgpot/cg_can_to_mom.m, + BNT/potentials/@cgpot/cg_mom_to_can.m, + BNT/potentials/@cgpot/cgpot.m, BNT/potentials/@cgpot/display.m, + BNT/potentials/@cgpot/divide_by_pot.m, + BNT/potentials/@cgpot/domain_pot.m, + BNT/potentials/@cgpot/enter_cts_evidence_pot.m, + BNT/potentials/@cgpot/enter_discrete_evidence_pot.m, + BNT/potentials/@cgpot/marginalize_pot.m, + BNT/potentials/@cgpot/multiply_by_pot.m, + BNT/potentials/@cgpot/multiply_pots.m, + BNT/potentials/@cgpot/normalize_pot.m, + BNT/potentials/@cgpot/pot_to_marginal.m, + BNT/potentials/@cgpot/Old/normalize_pot.m, + BNT/potentials/@cgpot/Old/simple_marginalize_pot.m, + BNT/potentials/@cpot/cpot.m, BNT/potentials/@cpot/cpot_to_mpot.m, + BNT/potentials/@cpot/display.m, + BNT/potentials/@cpot/divide_by_pot.m, + BNT/potentials/@cpot/domain_pot.m, + BNT/potentials/@cpot/enter_cts_evidence_pot.m, + BNT/potentials/@cpot/marginalize_pot.m, + BNT/potentials/@cpot/multiply_by_pot.m, + BNT/potentials/@cpot/multiply_pots.m, + BNT/potentials/@cpot/normalize_pot.m, + BNT/potentials/@cpot/pot_to_marginal.m, + BNT/potentials/@cpot/rescale_pot.m, + BNT/potentials/@cpot/set_domain_pot.m, + BNT/potentials/@cpot/Old/cpot_to_mpot.m, + BNT/potentials/@cpot/Old/normalize_pot.convert.m, + BNT/potentials/@dpot/approxeq_pot.m, + BNT/potentials/@dpot/display.m, + BNT/potentials/@dpot/domain_pot.m, + BNT/potentials/@dpot/dpot_to_table.m, + BNT/potentials/@dpot/get_fields.m, + BNT/potentials/@dpot/multiply_pots.m, + BNT/potentials/@dpot/pot_to_marginal.m, + BNT/potentials/@dpot/set_domain_pot.m, + BNT/potentials/@mpot/display.m, + BNT/potentials/@mpot/marginalize_pot.m, + BNT/potentials/@mpot/mpot.m, BNT/potentials/@mpot/mpot_to_cpot.m, + BNT/potentials/@mpot/normalize_pot.m, + BNT/potentials/@mpot/pot_to_marginal.m, + BNT/potentials/@mpot/rescale_pot.m, + BNT/potentials/@upot/approxeq_pot.m, + BNT/potentials/@upot/display.m, + BNT/potentials/@upot/divide_by_pot.m, + BNT/potentials/@upot/marginalize_pot.m, + BNT/potentials/@upot/multiply_by_pot.m, + BNT/potentials/@upot/normalize_pot.m, + BNT/potentials/@upot/pot_to_marginal.m, + BNT/potentials/@upot/upot.m, + BNT/potentials/@upot/upot_to_opt_policy.m, + BNT/potentials/Old/comp_eff_node_sizes.m, + BNT/potentials/Tables/divide_by_sparse_table.c, + BNT/potentials/Tables/divide_by_table.c, + BNT/potentials/Tables/marg_sparse_table.c, + BNT/potentials/Tables/marg_table.c, + BNT/potentials/Tables/mult_by_sparse_table.c, + BNT/potentials/Tables/rep_mult.c, HMM/mk_leftright_transmat.m: + Initial revision + +2002-05-29 04:59 yozhik + + * BNT/inference/static/@stab_cond_gauss_inf_engine/: + clq_containing_nodes.m, problems.txt, push_pot_toclique.m, + Old/initialize_engine.m: Initial import of code base from Kevin + Murphy. + +2002-05-29 04:59 yozhik + + * BNT/inference/static/@stab_cond_gauss_inf_engine/: + clq_containing_nodes.m, problems.txt, push_pot_toclique.m, + Old/initialize_engine.m: Initial revision + +2002-05-19 15:11 yozhik + + * BNT/potentials/: @scgcpot/marginalize_pot.m, + @scgcpot/normalize_pot.m, @scgcpot/rescale_pot.m, + @scgcpot/scgcpot.m, @scgpot/direct_combine_pots.m, + @scgpot/pot_to_marginal.m: Initial import of code base from Kevin + Murphy. + +2002-05-19 15:11 yozhik + + * BNT/potentials/: @scgcpot/marginalize_pot.m, + @scgcpot/normalize_pot.m, @scgcpot/rescale_pot.m, + @scgcpot/scgcpot.m, @scgpot/direct_combine_pots.m, + @scgpot/pot_to_marginal.m: Initial revision + +2001-07-28 08:43 yozhik + + * BNT/potentials/genops.c: Initial import of code base from Kevin + Murphy. + +2001-07-28 08:43 yozhik + + * BNT/potentials/genops.c: Initial revision + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/ChangeLog.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/ChangeLog.txt Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,51 @@ +- 7 May 2010 wsun +* revised \BNT\inference\static\@jtree_inf_engine\jtree_inf_engine.m: It is proved that the last clique created in the elimination process is not necessarily to be a strong root for CLG model. Instead, we use a clique called interface clique, that contains all discrete parents and at least one continuous node from a connected continuous component, as a guaranteed strong root. + +* added one function \graph\findroot.m: this is to find the interface cliqeu with max number of discrete parent nodes as the guaranteed strong root. + +- 16 Feb 2010 mdunham +* Minor bug fixes to make MATLAB 2009 compatible + +- 19 Oct 97 murphyk + +* BNT\CPDs\@noisyor_CPD\CPD_to_CPT.m: 2nd half of the file is a repeat +of the first half and was deleted (thanks to Karl Kuschner) + +* KPMtools\myismember.m should return logical for use in "assert" so add line at end + p=logical(p); this prevents "assert" from failing on an integer input. +(thanks to Karl Kuschner) + + + +- 17 Oct 07 murphyk + +* Updated subv2ind and ind2subv in KPMtools to Tom Minka's implementation. +His ind2subv is faster (vectorized), but I had to modify it so it +matched the behavior of my version when called with siz=[]. +His subv2inv is slightly simpler than mine because he does not treat +the siz=[2 2 ... 2] case separately. +Note: there is now no need to ever use the C versions of these +functions (or any others, for that matter). + +* removed BNT/add_BNT_to_path since no longer needed. + + + +- 4 Oct 07 murphyk + +* moved code from sourceforge to UBC website, made version 1.0.4 + +* @pearl_inf_engine/pearl_inf_engine line 24, default +argument for protocol changed from [] to 'parallel'. +Also, changed private/parallel_protocol so it doesn't write to an +empty file id (Matlab 7 issue) + +* added foptions (Matlab 7 issue) + +* changed genpathKPM to exclude svn. Put it in toplevel directory to +massively simplify the installation process. + + +-------------------------------------- +For changes to the sourceforge version, +culminating in version 1.0.3, see ChangeLog.Sourceforge.txt. diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +/README.txt/1.1.1.1/Mon Jun 7 14:33:20 2004// +/approxeq.m/1.1.1.1/Mon Jul 28 22:44:46 2003// +/arrow.m/1.1.1.1/Fri Mar 28 16:35:06 2003// +/dot_to_graph.m/1.1.1.1/Fri Mar 12 23:21:08 2004// +/draw_dbn.m/1.1.1.1/Wed May 29 15:59:54 2002// +/draw_dbn_test.m/1.1.1.1/Sat Jan 18 21:17:18 2003// +/draw_dot.m/1.1.1.1/Wed May 5 13:32:32 2004// +/draw_graph.m/1.1.1.1/Mon Mar 22 22:32:04 2004// +/draw_graph_test.m/1.1.1.1/Mon Jan 20 16:56:02 2003// +/draw_hmm.m/1.1.1.1/Thu Feb 6 03:16:32 2003// +/editGraphGUI.m/1.1.1.1/Tue Jan 27 21:08:48 2004// +/graph_to_dot.m/1.1.1.1/Tue Jun 15 17:50:40 2004// +/make_layout.m/1.1.1.1/Wed May 29 15:59:54 2002// +/my_call.m/1.1.1.1/Tue Jan 27 21:08:38 2004// +/process_options.m/1.1.1.1/Mon Jul 28 22:44:46 2003// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/CVS/Entries.Log --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/CVS/Entries.Log Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +A D/Old//// diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/GraphViz diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/Old/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,6 @@ +/dot_to_graph.m/1.1.1.1/Tue Jan 27 21:01:54 2004// +/draw_dot.m/1.1.1.1/Tue Jan 27 20:42:50 2004// +/draw_graph.m/1.1.1.1/Tue Jan 27 21:03:56 2004// +/graphToDot.m/1.1.1.1/Tue Feb 3 17:15:18 2004// +/pre_pesha_graph_to_dot.m/1.1.1.1/Tue Jan 27 20:47:40 2004// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/Old/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/GraphViz/Old diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/Old/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/Old/dot_to_graph.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/dot_to_graph.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,107 @@ +function [Adj, labels, x, y] = dot_to_graph(filename) + +% [Adj, labels, x, y] = dot_to_graph(filename) +% Extract a matrix representation, node labels, and node position coordinates +% from a file in GraphViz format http://www.research.att.com/sw/tools/graphviz +% +% INPUTS: +% 'filename' - the file in DOT format containing the graph layout. +% OUTPUT: +% 'Adj' - an adjacency matrix representation of the graph in 'filename'; +% 'labels' - a character array with the names of the nodes of the graph; +% 'x' - a row vector with the x-coordinates of the nodes in 'filename'; +% 'y' - a row vector with the y-coordinates of the nodes in 'filename'. +% +% WARNINGS: not guaranted to parse ANY GraphViz file. Debugged on undirected +% sample graphs from GraphViz(Heawood, Petersen, ER, ngk10_4, process). +% Complaines about RecursionLimit set only to 500 on huge graphs. +% Ignores singletons (disjoint nodes). +% Sample DOT code "ABC.dot", read by [Adj, labels, x, y] = dot_to_graph('ABC.dot') +% digraph G { +% A [pos="28,31"]; +% B [pos="74,87"]; +% A -- B [pos="e,61,71 41,47 46,53 50,58 55,64"]; +% } +% last modified: Jan 2004 +% by Alexi Savov: asavov @wustl.edu | http://artsci.wustl.edu/~azsavov +% Leon Peshkin: pesha @ai.mit.edu | http://www.ai.mit.edu/~pesha + +if ~exist(filename) % Checks whether the specified file exists. + error('* * * File does not exist or could not be found. * * *'); return; +end; + +lines = textread(filename,'%s','delimiter','\n','commentstyle','c'); % Read file into cell array +dot_lines = strvcat(lines); % of lines, ignoring C-style comments + +if findstr(dot_lines(1,:), 'graph ') == [] % Is this a DOT file ? + error('* * * File does not appear to be in valid DOT format. * * *'); return; +end; + +Nlns = size(dot_lines,1); % The number of lines; +labels = {}; +unread = 1:Nlns; % 'unread' list of lines which has not been examined yet +edge_id = 1; +for line_ndx = 1:Nlns % This section sets the adjacency matrix A(Lnode,Rnode) = edge_id. + line = dot_lines(line_ndx,:); + Ddash_pos = strfind(line, ' -- ') + 1; % double dash positions + arrow_pos = strfind(line, ' -> ') + 1; % arrow dash positions + tokens = strread(line,'%s','delimiter',' "'); + left_bound = 1; + for dash_pos = [Ddash_pos arrow_pos]; % if empty - not a POS line + Lnode = sscanf(line(left_bound:dash_pos -2), '%s'); + Rnode = sscanf(line(dash_pos +3 : length(line)-1),'%s',1); + Lndx = strmatch(Lnode, labels, 'exact'); + Rndx = strmatch(Rnode, labels, 'exact'); + if isempty(Lndx) % extend our list of labels + labels{end+1} = Lnode; + Lndx = length(labels); + end + if isempty(Rndx) + labels{end+1} = Rnode; + Rndx = length(labels); + end + Adj(Lndx, Rndx) = edge_id;; + if ismember(dash_pos, Ddash_pos) % The edge is undirected, A(Rndx,LndxL) is also set to 1; + Adj(Rndx, Lndx) = edge_id; + end + edge_id = edge_id + 1; + left_bound = dash_pos + 3; + unread = setdiff(unread, line_ndx); + end +end +Nvrt = length(labels); % number of vertices we found [Do we ever have singleton vertices ???] +% labels = strvcat(labels); % convert to the searchable array +x = zeros(1, Nvrt); +y = zeros(1, Nvrt); +lst_node = 0; + % Find node's position coordinates if they are contained in 'filename'. +for line_ndx = unread % Look for node's coordiantes among the 'unread' lines. + line = dot_lines(line_ndx,:); + bra_pos = strfind(line, '['); % has to have "[" if it has the lable + pos_pos = strfind(line, 'pos'); % position of the "pos" + for node = 1:Nvrt % look through the list of labels + % THE NEXT STATEMENT we assume no label is substring of any other label + lbl_pos = strfind(line, labels{node}); + if (~isempty(lbl_pos) & ~isempty(bra_pos) & (x(node) == 0)) % make sure we have not seen it + if (lbl_pos(1) < bra_pos(1)) % label has to be to the left of braket + lst_node = node; + end + end + end + if (~isempty(pos_pos) & lst_node) % this line contains SOME position + [node_pos] = sscanf(line(pos_pos:length(line)), ' pos = "%d,%d"')'; + x(lst_node) = node_pos(1); + y(lst_node) = node_pos(2); + lst_node = 0; % not to assign position several times + end +end + +if (isempty(find(x)) & (nargout > 2)) % If coordinates were requested, but not found in 'filename'. + warning('File does not contain node coordinates.'); +end; +if ~(size(Adj,1)==size(Adj,2)) % Make sure Adj is a square matrix. ? + Adj = eye(max(size(Adj)),size(Adj,1))*Adj*eye(size(Adj,2),max(size(Adj))); +end; +x = .9*(x-min(x))/range(x)+.05; % normalise and push off margins +y = .9*(y-min(y))/range(y)+.05; + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/Old/draw_dot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/draw_dot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,26 @@ +function draw_dot(adj); +% +% draw_dot(name) +% +% Sample code illustrating use of dot_to_graph.m function +% Leon Peshkin +if ispc, shell = 'dos'; else, shell = 'unix'; end % Which OS ? + +cmdline = strcat(shell,'(''neato -V'')'); +status = eval(cmdline); +[status, result] = dos('neato -V'); % request version to check NEATO +if status == 1, fprintf('Complaining \n'); exit, end + +tmpDOTfile = '_GtDout.dot'; % to be platform independant no use of directories +tmpLAYOUT = '_LAYout.dot'; +directed = 0; % assume UN-directed graph +graph_to_dot(adj > 0, 'directed', directed, 'filename', tmpDOTfile); % save in file + +cmdline = strcat([shell '(''neato -Tdot ' tmpDOTfile ' -o ' tmpLAYOUT ''')']); % preserve trailing spaces +status = eval(cmdline); % get NEATO todo layout + +[adj, labels, x, y] = dot_to_graph(tmpLAYOUT); % load layout +delete(tmpLAYOUT); delete(tmpDOTfile); % clean up temporary files + +figure(1); clf; axis square % now plot +[x, y, h] = draw_graph(adj>0, labels, zeros(size(x,2),1), x, y); \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/Old/draw_graph.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/draw_graph.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,310 @@ +function [x, y, h] = draw_graph(adj, labels, node_t, x, y) +% DRAW_LAYOUT Draws a layout for a graph +% +% [] = DRAW_LAYOUT(ADJ, ) +% +% Inputs : +% ADJ : Adjacency matrix (source, sink) +% LABELS : Cell array containing labels +% ISBOX : 1 if node is a box, 0 if oval +% X, Y, : Coordinates of nodes on the unit square +% +% Outputs : +% X, Y : Coordinates of nodes on the unit square +% H : Object handles +% +% Usage Example : [x, y] = draw_layout([0 1;0 0], {'Hidden','Visible'}, [1 0]'); +% +% h(i,1) is the text handle - color +% h(i,2) is the circle handle - facecolor +% +% Note : +% See also MAKE_LAYOUT + +% Uses : + +% Change History : +% Date Time Prog Note +% 13-Apr-2000 9:06 PM ATC Created under MATLAB 5.3.1.29215a (R11.1) + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl +adj = double(adj); +N = size(adj,1); +if nargin<2, +% labels = cellstr(char(zeros(N,1)+double('+'))); + labels = cellstr(int2str((1:N)')); +end; + +if nargin<3, + node_t = zeros(N,1); +% node_t = rand(N,1) > 0.5; +else + node_t = node_t(:); +end; + +axis([0 1 0 1]); +set(gca,'XTick',[],'YTick',[],'box','on'); +% axis('square'); +%colormap(flipud(gray)); + +if nargin<4, + [x y] = make_layout(adj); +end; + +idx1 = find(node_t==0); wd1=[]; +if ~isempty(idx1), +[h1 wd1] = textoval(x(idx1), y(idx1), labels(idx1)); +end; + +idx2 = find(node_t~=0); wd2 = []; +if ~isempty(idx2), +[h2 wd2] = textbox(x(idx2), y(idx2), labels(idx2)); +end; + +wd = zeros(size(wd1,1)+size(wd2,1),2); +if ~isempty(idx1), wd(idx1, :) = wd1; end; +if ~isempty(idx2), wd(idx2, :) = wd2; end; + +for i=1:N, + j = find(adj(i,:)==1); + for k=j, + if x(k)-x(i)==0, + sign = 1; + if y(i)>y(k), alpha = -pi/2; else alpha = pi/2; end; + else + alpha = atan((y(k)-y(i))/(x(k)-x(i))); + if x(i)2, + h = zeros(length(wd),2); + if ~isempty(idx1), + h(idx1,:) = h1; + end; + if ~isempty(idx2), + h(idx2,:) = h2; + end; +end; + +%%%%% + +function [t, wd] = textoval(x, y, str) +% TEXTOVAL Draws an oval around text objects +% +% [T, WIDTH] = TEXTOVAL(X, Y, STR) +% [..] = TEXTOVAL(STR) % Interactive +% +% Inputs : +% X, Y : Coordinates +% TXT : Strings +% +% Outputs : +% T : Object Handles +% WIDTH : x and y Width of ovals +% +% Usage Example : [t] = textoval('Visit to Asia?'); +% +% +% Note : +% See also TEXTBOX + +% Uses : + +% Change History : +% Date Time Prog Note +% 15-Jun-1998 10:36 AM ATC Created under MATLAB 5.1.0.421 + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +temp = []; + +switch nargin, + case 1, + str = x; + if ~isa(str,'cell') str=cellstr(str); end; + N = length(str); + wd = zeros(N,2); + for i=1:N, + [x, y] = ginput(1); + tx = text(x,y,str{i},'HorizontalAlignment','center','VerticalAlign','middle'); + [ptc wx wy] = draw_oval(tx, x, y); + wd(i,:) = [wx wy]; + delete(tx); + tx = text(x,y,str{i},'HorizontalAlignment','center','VerticalAlign','middle'); + temp = [temp ; tx ptc]; + end; + case 3, + if ~isa(str,'cell') str=cellstr(str); end; + N = length(str); + wd = zeros(N,2); + for i=1:N, + tx = text(x(i),y(i),str{i},'HorizontalAlignment','center','VerticalAlign','middle'); + [ptc wx wy] = draw_oval(tx, x(i), y(i)); + wd(i,:) = [wx wy]; + delete(tx); + tx = text(x(i),y(i),str{i},'HorizontalAlignment','center','VerticalAlign','middle'); + temp = [temp; tx ptc]; + end; + otherwise, +end; + +if nargout>0, t = temp; end; + +%%%%%%%%% + + +function [ptc, wx, wy] = draw_oval(tx, x, y) +% Draws an oval box around a tex object +sz = get(tx,'Extent'); +wy = sz(4); +wx = max(2/3*sz(3), wy); +wx = 0.5*wx; % KPM +wy = 0.5*wy; +ptc = ellipse(x, y, wx, wy); +set(ptc, 'FaceColor','w'); + + +%%%%%%%%%%%%% + +function [p] = ellipse(x, y, rx, ry, c) +% ELLIPSE Draws Ellipse shaped patch objects +% +% [

] = ELLIPSE(X, Y, Rx, Ry, C) +% +% Inputs : +% X : N x 1 vector of x coordinates +% Y : N x 1 vector of y coordinates +% Rx, Ry : Radii +% C : Color index +% +% +% Outputs : +% P = Handles of Ellipse shaped path objects +% +% Usage Example : [] = ellipse(); +% +% +% Note : +% See also + +% Uses : + +% Change History : +% Date Time Prog Note +% 27-May-1998 9:55 AM ATC Created under MATLAB 5.1.0.421 + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +if (nargin < 2) error('Usage Example : e = ellipse([0 1],[0 -1],[1 0.5],[2 0.5]); '); end; +if (nargin < 3) rx = 0.1; end; +if (nargin < 4) ry = rx; end; +if (nargin < 5) c = 1; end; + +if length(c)==1, c = ones(size(x)).*c; end; +if length(rx)==1, rx = ones(size(x)).*rx; end; +if length(ry)==1, ry = ones(size(x)).*ry; end; + +n = length(x); +p = zeros(size(x)); +t = 0:pi/30:2*pi; +for i=1:n, + px = rx(i)*cos(t)+x(i); + py = ry(i)*sin(t)+y(i); + p(i) = patch(px,py,c(i)); +end; + +if nargout>0, pp = p; end; + +%%%%% + +function [t, wd] = textbox(x,y,str) +% TEXTBOX Draws A Box around the text +% +% [T, WIDTH] = TEXTBOX(X, Y, STR) +% [..] = TEXTBOX(STR) +% +% Inputs : +% X, Y : Coordinates +% TXT : Strings +% +% Outputs : +% T : Object Handles +% WIDTH : x and y Width of boxes +%% +% Usage Example : t = textbox({'Ali','Veli','49','50'}); +% +% +% Note : +% See also TEXTOVAL + +% Uses : + +% Change History : +% Date Time Prog Note +% 09-Jun-1998 11:43 AM ATC Created under MATLAB 5.1.0.421 + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +% See +temp = []; + +switch nargin, + case 1, + str = x; + if ~isa(str,'cell') str=cellstr(str); end; + N = length(str); + wd = zeros(N,2); + for i=1:N, + [x, y] = ginput(1); + tx = text(x,y,str{i},'HorizontalAlignment','center','VerticalAlign','middle'); + [ptc wx wy] = draw_box(tx, x, y); + wd(i,:) = [wx wy]; + delete(tx); + tx = text(x,y,str{i},'HorizontalAlignment','center','VerticalAlign','middle'); + temp = [temp; tx ptc]; + end; + case 3, + if ~isa(str,'cell') str=cellstr(str); end; + N = length(str); + for i=1:N, + tx = text(x(i),y(i),str{i},'HorizontalAlignment','center','VerticalAlign','middle'); + [ptc wx wy] = draw_box(tx, x(i), y(i)); + wd(i,:) = [wx wy]; + delete(tx); + tx = text(x(i),y(i),str{i},'HorizontalAlignment','center','VerticalAlign','middle'); + temp = [temp; tx ptc]; + end; + + otherwise, + +end; + +if nargout>0, t = temp; end; + + +function [ptc, wx, wy] = draw_box(tx, x, y) +% Draws a box around a tex object + sz = get(tx,'Extent'); + wy = 2/3*sz(4); + wx = max(2/3*sz(3), wy); + ptc = patch([x-wx x+wx x+wx x-wx], [y+wy y+wy y-wy y-wy],'w'); + set(ptc, 'FaceColor','w'); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/Old/graphToDot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/graphToDot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,84 @@ +function graphToDot(adj, varargin) +% GRAPHTODOT Makes a GraphViz (AT&T) ile representing an adjacency matrix +% function graphToDot(adj, ...) +% Optional arguments should be passed as name/value pairs [default] +% +% 'filename' - if omitted, writes to 'tmp.dot' +% 'arc_label' - arc_label{i,j} is a string attached to the i-j arc [""] +% 'node_label' - node_label{i} is a string attached to the node i ["i"] +% 'width' - width in inches [10] +% 'height' - height in inches [10] +% 'leftright' - 1 means layout left-to-right, 0 means top-to-bottom [0] +% 'directed' - 1 means use directed arcs, 0 means undirected [1] +% +% For details on graphviz, See http://www.research.att.com/sw/tools/graphviz +% +% See also dot_to_graph and draw_dot +% +% First version written by Kevin Murphy 2002. +% Modified by Leon Peshkin, Jan 2004. + +node_label = []; arc_label = []; % set default args +width = 10; height = 10; +leftright = 0; directed = 1; filename = 'tmp.dot'; + +for i = 1:2:nargin-1 % get optional args + switch varargin{i} + case 'filename', filename = varargin{i+1}; + case 'node_label', node_label = varargin{i+1}; + case 'arc_label', arc_label = varargin{i+1}; + case 'width', width = varargin{i+1}; + case 'height', height = varargin{i+1}; + case 'leftright', leftright = varargin{i+1}; + case 'directed', directed = varargin{i+1}; + end +end + +fid = fopen(filename, 'w'); +if directed + fprintf(fid, 'digraph G {\n'); + arctxt = '->'; + if isempty(arc_label) + labeltxt = ''; + else + labeltxt = '[label="%s"]'; + end +else + fprintf(fid, 'graph G {\n'); + arctxt = '--'; + if isempty(arc_label) + labeltxt = '[dir=none]'; + else + labeltext = '[label="%s",dir=none]'; + end +end +edgeformat = strcat(['%d ',arctxt,' %d ',labeltxt,';\n']); +fprintf(fid, 'center = 1;\n'); +fprintf(fid, 'size=\"%d,%d\";\n', width, height); +if leftright + fprintf(fid, 'rankdir=LR;\n'); +end +Nnds = length(adj); +for node = 1:Nnds % process nodes + if isempty(node_label) + fprintf(fid, '%d;\n', node); + else + fprintf(fid, '%d [ label = "%s" ];\n', node, +node_label{node}); + end +end +for node1 = 1:Nnds % process edges + if directed + arcs = find(adj(node1,:)); % children(adj, node); + else + arcs = find(adj(node1,node1+1:Nnds)); % remove duplicate arcs + end + for node2 = arcs + fprintf(fid, edgeformat, node1, node2); + end +end +fprintf(fid, '}'); +fclose(fid); + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/Old/pre_pesha_graph_to_dot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/Old/pre_pesha_graph_to_dot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,166 @@ +function graph_to_dot(G, varargin) +% DAG_TO_DOT Make a file representing the directed graph in dotty format. +% dag_to_dot(G, ...) +% +% Optional arguments should be passed as name/value pairs [default] +% +% 'filename' - if omitted, we write to 'tmp.dot', convert this to 'tmp.ps', +% and then call ghostview automatically +% 'arc_label' - arc_label{i,j} is a string attached to the i->j arc. [""] +% 'node_label' - node_label{i} is a string attached to node i. ["i"] +% 'width' - width in inches [10] +% 'height' - height in inches [10] +% 'leftright' - 1 means layout left-to-right, 0 means top-to-bottom [0] +% 'directed' - 1 means use directed arcs, 0 means undirected [1] +% +% For details on dotty, See http://www.research.att.com/sw/tools/graphviz +% +% Example: +% G = rand(5,5); +% names = cell(5,5); +% names{1,2} = 'arc 1-2'; +% graph_to_dot(G, 'arc_label', names) +% or graph_to_dot(G, 'arc_label', 'numbers') % prints value of G(i,j) on i->j arc + +% Kevin Murphy, 1998 + +% set default args +filename = []; +node_label = []; +arc_label = []; +width = 10; +height = 10; +leftright = 0; +directed = 1; +% get optional args +args = varargin; +for i=1:2:length(args) + switch args{i} + case 'filename', filename = args{i+1}; + case 'node_label', node_label = args{i+1}; + case 'arc_label', arc_label = args{i+1}; + case 'width', width = args{i+1}; + case 'height', height = args{i+1}; + case 'leftright', leftright = args{i+1}; + case 'directed', directed = args{i+1}; + end +end + +if isstr(arc_label) & strcmp(arc_label, 'numbers') + N = length(G); + arc_label = cell(N,N); + for i=1:N + for j=1:N + arc_label{i,j} = sprintf('%4.2f', G(i,j)); + end + end +end + +if isempty(filename) + make_file(G, 'tmp.dot', node_label, arc_label, width, height, leftright, directed); + if isunix + !dot -Tps tmp.dot -o tmp.ps + + !gs tmp.ps & + else + dos('dot -Tps tmp.dot -o tmp.ps'); + dos('gsview32 tmp.ps &'); + end +else + + + make_file(G, filename, node_label, arc_label, width, height, leftright, directed); +end + + +%%%%%% + +function make_file(G, filename, node_label, arc_label, width, height, leftright, directed) + +n = length(G); +fid = fopen(filename, 'w'); +if directed + fprintf(fid, 'digraph G {\n'); +else + fprintf(fid, 'graph G {\n'); +end +fprintf(fid, 'center = 1;\n'); +fprintf(fid, 'size=\"%d,%d\";\n', width, height); +if leftright + fprintf(fid, 'rankdir=LR;\n'); +end +for i=1:n + if isempty(node_label) + fprintf(fid, '%d;\n', i); + else + fprintf(fid, '%d [ label = "%s" ];\n', i, node_label{i}); + end +end +if directed + for i=1:n + cs = children(G,i); + for j=1:length(cs) + c = cs(j); + if isempty(arc_label) + fprintf(fid, '%d -> %d;\n', i, c); + else + fprintf(fid, '%d -> %d [label="%s"];\n', i, c, arc_label{i,c}); + end + end + end +else + for i=1:n + ns = intersect(neighbors(G,i), i+1:n); % remove duplicate arcs + for j=1:length(ns) + c = ns(j); + if isempty(arc_label) + fprintf(fid, '%d -- %d [dir=none];\n', i, c); + else + fprintf(fid, '%d -- %d [label="%s",dir=none];\n', i, c, arc_label{i,c}); + end + end + end +end +fprintf(fid, '\n}'); +fclose(fid); + + + +%%%%%%%%%%%%%%% + +function cs = children(adj_mat, i, t) +% CHILDREN Return the indices of a node's children in sorted order +% c = children(adj_mat, i, t) +% +% t is an optional argument: if present, dag is assumed to be a 2-slice DBN + +if nargin < 3 + cs = find(adj_mat(i,:)); +else + if t==1 + cs = find(adj_mat(i,:)); + else + ss = length(adj_mat)/2; + j = i+ss; + cs = find(adj_mat(j,:)) + (t-2)*ss; + end +end + +%%%%%%%%%%%% + +function ps = parents(adj_mat, i) +% PARENTS Return the list of parents of node i +% ps = parents(adj_mat, i) + +ps = find(adj_mat(:,i))'; + +%%%%%%%%%%%%% + +function ns = neighbors(adj_mat, i) +% NEIGHBORS Find the parents and children of a node in a graph. +% ns = neighbors(adj_mat, i) + +ns = union(children(adj_mat, i), parents(adj_mat, i)); + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/README.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/README.txt Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +Graph visualization (automatic layout) functions +------------------------------------------------ + +This directory contains code to automatically layout and visualize +graphs. It provides a matlab interface to the graphviz program: + http://www.research.att.com/sw/tools/graphviz +Written by Kevin Murphy, Leon Peshkin, Tom Minka. + +draw_graph was written by Ali Taylan Cemgil, and is entirely +self-contained matlab: it does not need graphviz, but produces lower +quality results. +http://www.mbfys.kun.nl/~cemgil/matlab/layout.html + +See also the following URLs for other graph layout programs: + http://www.ics.uci.edu/~eppstein/gina/gdraw.html + http://www.cwi.nl/InfoVisu/ diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/approxeq.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/approxeq.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,21 @@ +function p = approxeq(a, b, tol, rel) +% APPROXEQ Are a and b approximately equal (to within a specified tolerance)? +% p = approxeq(a, b, thresh) +% 'tol' defaults to 1e-3. +% p(i) = 1 iff abs(a(i) - b(i)) < thresh +% +% p = approxeq(a, b, thresh, 1) +% p(i) = 1 iff abs(a(i)-b(i))/abs(a(i)) < thresh + +if nargin < 3, tol = 1e-2; end +if nargin < 4, rel = 0; end + +a = a(:); +b = b(:); +d = abs(a-b); +if rel + p = ~any( (d ./ (abs(a)+eps)) > tol); +else + p = ~any(d > tol); +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/arrow.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/arrow.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1333 @@ +function [h,yy,zz] = arrow(varargin) +% ARROW Draw a line with an arrowhead. +% +% ARROW(Start,Stop) draws a line with an arrow from Start to Stop (points +% should be vectors of length 2 or 3, or matrices with 2 or 3 +% columns), and returns the graphics handle of the arrow(s). +% +% ARROW uses the mouse (click-drag) to create an arrow. +% +% ARROW DEMO & ARROW DEMO2 show 3-D & 2-D demos of the capabilities of ARROW. +% +% ARROW may be called with a normal argument list or a property-based list. +% ARROW(Start,Stop,Length,BaseAngle,TipAngle,Width,Page,CrossDir) is +% the full normal argument list, where all but the Start and Stop +% points are optional. If you need to specify a later argument (e.g., +% Page) but want default values of earlier ones (e.g., TipAngle), +% pass an empty matrix for the earlier ones (e.g., TipAngle=[]). +% +% ARROW('Property1',PropVal1,'Property2',PropVal2,...) creates arrows with the +% given properties, using default values for any unspecified or given as +% 'default' or NaN. Some properties used for line and patch objects are +% used in a modified fashion, others are passed directly to LINE, PATCH, +% or SET. For a detailed properties explanation, call ARROW PROPERTIES. +% +% Start The starting points. B +% Stop The end points. /|\ ^ +% Length Length of the arrowhead in pixels. /|||\ | +% BaseAngle Base angle in degrees (ADE). //|||\\ L| +% TipAngle Tip angle in degrees (ABC). ///|||\\\ e| +% Width Width of the base in pixels. ////|||\\\\ n| +% Page Use hardcopy proportions. /////|D|\\\\\ g| +% CrossDir Vector || to arrowhead plane. //// ||| \\\\ t| +% NormalDir Vector out of arrowhead plane. /// ||| \\\ h| +% Ends Which end has an arrowhead. //<----->|| \\ | +% ObjectHandles Vector of handles to update. / base ||| \ V +% E angle||<-------->C +% ARROW(H,'Prop1',PropVal1,...), where H is a |||tipangle +% vector of handles to previously-created arrows ||| +% and/or line objects, will update the previously- ||| +% created arrows according to the current view -->|A|<-- width +% and any specified properties, and will convert +% two-point line objects to corresponding arrows. ARROW(H) will update +% the arrows if the current view has changed. Root, figure, or axes +% handles included in H are replaced by all descendant Arrow objects. +% +% A property list can follow any specified normal argument list, e.g., +% ARROW([1 2 3],[0 0 0],36,'BaseAngle',60) creates an arrow from (1,2,3) to +% the origin, with an arrowhead of length 36 pixels and 60-degree base angle. +% +% The basic arguments or properties can generally be vectorized to create +% multiple arrows with the same call. This is done by passing a property +% with one row per arrow, or, if all arrows are to have the same property +% value, just one row may be specified. +% +% You may want to execute AXIS(AXIS) before calling ARROW so it doesn't change +% the axes on you; ARROW determines the sizes of arrow components BEFORE the +% arrow is plotted, so if ARROW changes axis limits, arrows may be malformed. +% +% This version of ARROW uses features of MATLAB 5 and is incompatible with +% earlier MATLAB versions (ARROW for MATLAB 4.2c is available separately); +% some problems with perspective plots still exist. + +% Copyright (c)1995-1997, Erik A. Johnson , 8/14/97 + +% Revision history: +% 8/14/97 EAJ Added workaround for MATLAB 5.1 scalar logical transpose bug. +% 7/21/97 EAJ Fixed a few misc bugs. +% 7/14/97 EAJ Make arrow([],'Prop',...) do nothing (no old handles) +% 6/23/97 EAJ MATLAB 5 compatible version, release. +% 5/27/97 EAJ Added Line Arrows back in. Corrected a few bugs. +% 5/26/97 EAJ Changed missing Start/Stop to mouse-selected arrows. +% 5/19/97 EAJ MATLAB 5 compatible version, beta. +% 4/13/97 EAJ MATLAB 5 compatible version, alpha. +% 1/31/97 EAJ Fixed bug with multiple arrows and unspecified Z coords. +% 12/05/96 EAJ Fixed one more bug with log plots and NormalDir specified +% 10/24/96 EAJ Fixed bug with log plots and NormalDir specified +% 11/13/95 EAJ Corrected handling for 'reverse' axis directions +% 10/06/95 EAJ Corrected occasional conflict with SUBPLOT +% 4/24/95 EAJ A major rewrite. +% Fall 94 EAJ Original code. + +% Things to be done: +% - segment parsing, computing, and plotting into separate subfunctions +% - change computing from Xform to Camera paradigms +% + this will help especially with 3-D perspective plots +% + if the WarpToFill section works right, remove warning code +% + when perpsective works properly, remove perspective warning code +% - add cell property values and struct property name/values (like get/set) +% - get rid of NaN as the "default" data label +% + perhaps change userdata to a struct and don't include (or leave +% empty) the values specified as default; or use a cell containing +% an empty matrix for a default value +% - add functionality of GET to retrieve current values of ARROW properties + +% Many thanks to Keith Rogers for his many excellent +% suggestions and beta testing. Check out his shareware package MATDRAW. +% He has permission to distribute ARROW with MATDRAW. + +% global variable initialization +global ARROW_PERSP_WARN ARROW_STRETCH_WARN ARROW_AXLIMITS +if isempty(ARROW_PERSP_WARN ), ARROW_PERSP_WARN =1; end; +if isempty(ARROW_STRETCH_WARN), ARROW_STRETCH_WARN=1; end; + +% Handle callbacks +if (nargin>0 & isstr(varargin{1}) & strcmp(lower(varargin{1}),'callback')), + arrow_callback(varargin{2:end}); return; +end; + +% Are we doing the demo? +c = sprintf('\n'); +if (nargin==1 & isstr(varargin{1})), + arg1 = lower(varargin{1}); + if strncmp(arg1,'prop',4), arrow_props; + elseif strncmp(arg1,'demo',4) + clf reset + demo_info = arrow_demo; + if ~strncmp(arg1,'demo2',5), + hh=arrow_demo3(demo_info); + else, + hh=arrow_demo2(demo_info); + end; + if (nargout>=1), h=hh; end; + elseif strncmp(arg1,'fixlimits',3), + arrow_fixlimits(ARROW_AXLIMITS); + ARROW_AXLIMITS=[]; + elseif strncmp(arg1,'help',4), + disp(help(mfilename)); + else, + error([upper(mfilename) ' got an unknown single-argument string ''' deblank(arg1) '''.']); + end; + return; +end; + +% Check # of arguments +if (nargout>3), error([upper(mfilename) ' produces at most 3 output arguments.']); end; + +% find first property number +firstprop = nargin+1; +for k=1:length(varargin), if ~isnumeric(varargin{k}), firstprop=k; break; end; end; +lastnumeric = firstprop-1; + +% check property list +if (firstprop<=nargin), + for k=firstprop:2:nargin, + curarg = varargin{k}; + if ~isstr(curarg) | sum(size(curarg)>1)>1, + error([upper(mfilename) ' requires that a property name be a single string.']); + end; + end; + if (rem(nargin-firstprop,2)~=1), + error([upper(mfilename) ' requires that the property ''' ... + varargin{nargin} ''' be paired with a property value.']); + end; +end; + +% default output +if (nargout>0), h=[]; end; +if (nargout>1), yy=[]; end; +if (nargout>2), zz=[]; end; + +% set values to empty matrices +start = []; +stop = []; +len = []; +baseangle = []; +tipangle = []; +wid = []; +page = []; +crossdir = []; +ends = []; +ax = []; +oldh = []; +ispatch = []; +defstart = [NaN NaN NaN]; +defstop = [NaN NaN NaN]; +deflen = 16; +defbaseangle = 90; +deftipangle = 16; +defwid = 0; +defpage = 0; +defcrossdir = [NaN NaN NaN]; +defends = 1; +defoldh = []; +defispatch = 1; + +% The 'Tag' we'll put on our arrows +ArrowTag = 'Arrow'; + +% check for oldstyle arguments +if (firstprop==2), + % assume arg1 is a set of handles + oldh = varargin{1}(:); + if isempty(oldh), return; end; +elseif (firstprop>9), + error([upper(mfilename) ' takes at most 8 non-property arguments.']); +elseif (firstprop>2), + s = str2mat('start','stop','len','baseangle','tipangle','wid','page','crossdir'); + for k=1:firstprop-1, eval([deblank(s(k,:)) '=varargin{k};']); end; +end; + +% parse property pairs +extraprops={}; +for k=firstprop:2:nargin, + prop = varargin{k}; + val = varargin{k+1}; + prop = [lower(prop(:)') ' ']; + if strncmp(prop,'start' ,5), start = val; + elseif strncmp(prop,'stop' ,4), stop = val; + elseif strncmp(prop,'len' ,3), len = val(:); + elseif strncmp(prop,'base' ,4), baseangle = val(:); + elseif strncmp(prop,'tip' ,3), tipangle = val(:); + elseif strncmp(prop,'wid' ,3), wid = val(:); + elseif strncmp(prop,'page' ,4), page = val; + elseif strncmp(prop,'cross' ,5), crossdir = val; + elseif strncmp(prop,'norm' ,4), if (isstr(val)), crossdir=val; else, crossdir=val*sqrt(-1); end; + elseif strncmp(prop,'end' ,3), ends = val; + elseif strncmp(prop,'object',6), oldh = val(:); + elseif strncmp(prop,'handle',6), oldh = val(:); + elseif strncmp(prop,'type' ,4), ispatch = val; + elseif strncmp(prop,'userd' ,5), %ignore it + else, + % make sure it is a valid patch or line property + eval('get(0,[''DefaultPatch'' varargin{k}]);err=0;','err=1;'); errstr=lasterr; + if (err), eval('get(0,[''DefaultLine'' varargin{k}]);err=0;','err=1;'); end; + if (err), + errstr(1:max(find(errstr==setstr(13)|errstr==setstr(10)))) = ''; + error([upper(mfilename) ' got ' errstr]); + end; + extraprops={extraprops{:},varargin{k},val}; + end; +end; + +% Check if we got 'default' values +start = arrow_defcheck(start ,defstart ,'Start' ); +stop = arrow_defcheck(stop ,defstop ,'Stop' ); +len = arrow_defcheck(len ,deflen ,'Length' ); +baseangle = arrow_defcheck(baseangle,defbaseangle,'BaseAngle' ); +tipangle = arrow_defcheck(tipangle ,deftipangle ,'TipAngle' ); +wid = arrow_defcheck(wid ,defwid ,'Width' ); +crossdir = arrow_defcheck(crossdir ,defcrossdir ,'CrossDir' ); +page = arrow_defcheck(page ,defpage ,'Page' ); +ends = arrow_defcheck(ends ,defends ,'' ); +oldh = arrow_defcheck(oldh ,[] ,'ObjectHandles'); +ispatch = arrow_defcheck(ispatch ,defispatch ,'' ); + +% check transpose on arguments +[m,n]=size(start ); if any(m==[2 3])&(n==1|n>3), start = start'; end; +[m,n]=size(stop ); if any(m==[2 3])&(n==1|n>3), stop = stop'; end; +[m,n]=size(crossdir); if any(m==[2 3])&(n==1|n>3), crossdir = crossdir'; end; + +% convert strings to numbers +if ~isempty(ends) & isstr(ends), + endsorig = ends; + [m,n] = size(ends); + col = lower([ends(:,1:min(3,n)) ones(m,max(0,3-n))*' ']); + ends = NaN*ones(m,1); + oo = ones(1,m); + ii=find(all(col'==['non']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*0; end; + ii=find(all(col'==['sto']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*1; end; + ii=find(all(col'==['sta']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*2; end; + ii=find(all(col'==['bot']'*oo)'); if ~isempty(ii), ends(ii)=ones(length(ii),1)*3; end; + if any(isnan(ends)), + ii = min(find(isnan(ends))); + error([upper(mfilename) ' does not recognize ' deblank(endsorig(ii,:)) ' as a valid Ends value.']); + end; +else, + ends = ends(:); +end; +if ~isempty(ispatch) & isstr(ispatch), + col = lower(ispatch(:,1)); + patchchar='p'; linechar='l'; defchar=' '; + mask = col~=patchchar & col~=linechar & col~=defchar; + if any(mask) + error([upper(mfilename) ' does not recognize ' deblank(ispatch(min(find(mask)),:)) ' as a valid Type value.']); + end; + ispatch = (col==patchchar)*1 + (col==linechar)*0 + (col==defchar)*defispatch; +else, + ispatch = ispatch(:); +end; +oldh = oldh(:); + +% check object handles +if ~all(ishandle(oldh)), error([upper(mfilename) ' got invalid object handles.']); end; + +% expand root, figure, and axes handles +if ~isempty(oldh), + ohtype = get(oldh,'Type'); + mask = strcmp(ohtype,'root') | strcmp(ohtype,'figure') | strcmp(ohtype,'axes'); + if any(mask), + oldh = num2cell(oldh); + for ii=find(mask)', + oldh(ii) = {findobj(oldh{ii},'Tag',ArrowTag)}; + end; + oldh = cat(1,oldh{:}); + if isempty(oldh), return; end; % no arrows to modify, so just leave + end; +end; + +% largest argument length +[mstart,junk]=size(start); [mstop,junk]=size(stop); [mcrossdir,junk]=size(crossdir); +argsizes = [length(oldh) mstart mstop ... + length(len) length(baseangle) length(tipangle) ... + length(wid) length(page) mcrossdir length(ends) ]; +args=['length(ObjectHandle) '; ... + '#rows(Start) '; ... + '#rows(Stop) '; ... + 'length(Length) '; ... + 'length(BaseAngle) '; ... + 'length(TipAngle) '; ... + 'length(Width) '; ... + 'length(Page) '; ... + '#rows(CrossDir) '; ... + '#rows(Ends) ']; +if (any(imag(crossdir(:))~=0)), + args(9,:) = '#rows(NormalDir) '; +end; +if isempty(oldh), + narrows = max(argsizes); +else, + narrows = length(oldh); +end; +if (narrows<=0), narrows=1; end; + +% Check size of arguments +ii = find((argsizes~=0)&(argsizes~=1)&(argsizes~=narrows)); +if ~isempty(ii), + s = args(ii',:); + while ((size(s,2)>1)&((abs(s(:,size(s,2)))==0)|(abs(s(:,size(s,2)))==abs(' ')))), + s = s(:,1:size(s,2)-1); + end; + s = [ones(length(ii),1)*[upper(mfilename) ' requires that '] s ... + ones(length(ii),1)*[' equal the # of arrows (' num2str(narrows) ').' c]]; + s = s'; + s = s(:)'; + s = s(1:length(s)-1); + error(setstr(s)); +end; + +% check element length in Start, Stop, and CrossDir +if ~isempty(start), + [m,n] = size(start); + if (n==2), + start = [start NaN*ones(m,1)]; + elseif (n~=3), + error([upper(mfilename) ' requires 2- or 3-element Start points.']); + end; +end; +if ~isempty(stop), + [m,n] = size(stop); + if (n==2), + stop = [stop NaN*ones(m,1)]; + elseif (n~=3), + error([upper(mfilename) ' requires 2- or 3-element Stop points.']); + end; +end; +if ~isempty(crossdir), + [m,n] = size(crossdir); + if (n<3), + crossdir = [crossdir NaN*ones(m,3-n)]; + elseif (n~=3), + if (all(imag(crossdir(:))==0)), + error([upper(mfilename) ' requires 2- or 3-element CrossDir vectors.']); + else, + error([upper(mfilename) ' requires 2- or 3-element NormalDir vectors.']); + end; + end; +end; + +% fill empty arguments +if isempty(start ), start = [Inf Inf Inf]; end; +if isempty(stop ), stop = [Inf Inf Inf]; end; +if isempty(len ), len = Inf; end; +if isempty(baseangle ), baseangle = Inf; end; +if isempty(tipangle ), tipangle = Inf; end; +if isempty(wid ), wid = Inf; end; +if isempty(page ), page = Inf; end; +if isempty(crossdir ), crossdir = [Inf Inf Inf]; end; +if isempty(ends ), ends = Inf; end; +if isempty(ispatch ), ispatch = Inf; end; + +% expand single-column arguments +o = ones(narrows,1); +if (size(start ,1)==1), start = o * start ; end; +if (size(stop ,1)==1), stop = o * stop ; end; +if (length(len )==1), len = o * len ; end; +if (length(baseangle )==1), baseangle = o * baseangle ; end; +if (length(tipangle )==1), tipangle = o * tipangle ; end; +if (length(wid )==1), wid = o * wid ; end; +if (length(page )==1), page = o * page ; end; +if (size(crossdir ,1)==1), crossdir = o * crossdir ; end; +if (length(ends )==1), ends = o * ends ; end; +if (length(ispatch )==1), ispatch = o * ispatch ; end; +ax = o * gca; + +% if we've got handles, get the defaults from the handles +if ~isempty(oldh), + for k=1:narrows, + oh = oldh(k); + ud = get(oh,'UserData'); + ax(k) = get(oh,'Parent'); + ohtype = get(oh,'Type'); + if strcmp(get(oh,'Tag'),ArrowTag), % if it's an arrow already + if isinf(ispatch(k)), ispatch(k)=strcmp(ohtype,'patch'); end; + % arrow UserData format: [start' stop' len base tip wid page crossdir' ends] + start0 = ud(1:3); + stop0 = ud(4:6); + if (isinf(len(k))), len(k) = ud( 7); end; + if (isinf(baseangle(k))), baseangle(k) = ud( 8); end; + if (isinf(tipangle(k))), tipangle(k) = ud( 9); end; + if (isinf(wid(k))), wid(k) = ud(10); end; + if (isinf(page(k))), page(k) = ud(11); end; + if (isinf(crossdir(k,1))), crossdir(k,1) = ud(12); end; + if (isinf(crossdir(k,2))), crossdir(k,2) = ud(13); end; + if (isinf(crossdir(k,3))), crossdir(k,3) = ud(14); end; + if (isinf(ends(k))), ends(k) = ud(15); end; + elseif strcmp(ohtype,'line')|strcmp(ohtype,'patch'), % it's a non-arrow line or patch + convLineToPatch = 1; %set to make arrow patches when converting from lines. + if isinf(ispatch(k)), ispatch(k)=convLineToPatch|strcmp(ohtype,'patch'); end; + x=get(oh,'XData'); x=x(~isnan(x(:))); if isempty(x), x=NaN; end; + y=get(oh,'YData'); y=y(~isnan(y(:))); if isempty(y), y=NaN; end; + z=get(oh,'ZData'); z=z(~isnan(z(:))); if isempty(z), z=NaN; end; + start0 = [x(1) y(1) z(1) ]; + stop0 = [x(end) y(end) z(end)]; + else, + error([upper(mfilename) ' cannot convert ' ohtype ' objects.']); + end; + ii=find(isinf(start(k,:))); if ~isempty(ii), start(k,ii)=start0(ii); end; + ii=find(isinf(stop( k,:))); if ~isempty(ii), stop( k,ii)=stop0( ii); end; + end; +end; + +% convert Inf's to NaN's +start( isinf(start )) = NaN; +stop( isinf(stop )) = NaN; +len( isinf(len )) = NaN; +baseangle( isinf(baseangle)) = NaN; +tipangle( isinf(tipangle )) = NaN; +wid( isinf(wid )) = NaN; +page( isinf(page )) = NaN; +crossdir( isinf(crossdir )) = NaN; +ends( isinf(ends )) = NaN; +ispatch( isinf(ispatch )) = NaN; + +% set up the UserData data (here so not corrupted by log10's and such) +ud = [start stop len baseangle tipangle wid page crossdir ends]; + +% Set Page defaults +%page = (~isnan(page))&(page); +if isnan(page) + page = 0; +end + +% Get axes limits, range, min; correct for aspect ratio and log scale +axm = zeros(3,narrows); +axr = zeros(3,narrows); +axrev = zeros(3,narrows); +ap = zeros(2,narrows); +xyzlog = zeros(3,narrows); +limmin = zeros(2,narrows); +limrange = zeros(2,narrows); +oldaxlims = zeros(narrows,7); +oneax = all(ax==ax(1)); +if (oneax), + T = zeros(4,4); + invT = zeros(4,4); +else, + T = zeros(16,narrows); + invT = zeros(16,narrows); +end; +axnotdone = logical(ones(size(ax))); +while (any(axnotdone)), + ii = min(find(axnotdone)); + curax = ax(ii); + curpage = page(ii); + % get axes limits and aspect ratio + axl = [get(curax,'XLim'); get(curax,'YLim'); get(curax,'ZLim')]; + oldaxlims(min(find(oldaxlims(:,1)==0)),:) = [curax reshape(axl',1,6)]; + % get axes size in pixels (points) + u = get(curax,'Units'); + axposoldunits = get(curax,'Position'); + really_curpage = curpage & strcmp(u,'normalized'); + if (really_curpage), + curfig = get(curax,'Parent'); + pu = get(curfig,'PaperUnits'); + set(curfig,'PaperUnits','points'); + pp = get(curfig,'PaperPosition'); + set(curfig,'PaperUnits',pu); + set(curax,'Units','pixels'); + curapscreen = get(curax,'Position'); + set(curax,'Units','normalized'); + curap = pp.*get(curax,'Position'); + else, + set(curax,'Units','pixels'); + curapscreen = get(curax,'Position'); + curap = curapscreen; + end; + set(curax,'Units',u); + set(curax,'Position',axposoldunits); + % handle non-stretched axes position + str_stretch = { 'DataAspectRatioMode' ; ... + 'PlotBoxAspectRatioMode' ; ... + 'CameraViewAngleMode' }; + str_camera = { 'CameraPositionMode' ; ... + 'CameraTargetMode' ; ... + 'CameraViewAngleMode' ; ... + 'CameraUpVectorMode' }; + notstretched = strcmp(get(curax,str_stretch),'manual'); + manualcamera = strcmp(get(curax,str_camera),'manual'); + if ~arrow_WarpToFill(notstretched,manualcamera,curax), + % give a warning that this has not been thoroughly tested + if 0 & ARROW_STRETCH_WARN, + ARROW_STRETCH_WARN = 0; + strs = {str_stretch{1:2},str_camera{:}}; + strs = [char(ones(length(strs),1)*sprintf('\n ')) char(strs)]'; + warning([upper(mfilename) ' may not yet work quite right ' ... + 'if any of the following are ''manual'':' strs(:).']); + end; + % find the true pixel size of the actual axes + texttmp = text(axl(1,[1 2 2 1 1 2 2 1]), ... + axl(2,[1 1 2 2 1 1 2 2]), ... + axl(3,[1 1 1 1 2 2 2 2]),''); + set(texttmp,'Units','points'); + textpos = get(texttmp,'Position'); + delete(texttmp); + textpos = cat(1,textpos{:}); + textpos = max(textpos(:,1:2)) - min(textpos(:,1:2)); + % adjust the axes position + if (really_curpage), + % adjust to printed size + textpos = textpos * min(curap(3:4)./textpos); + curap = [curap(1:2)+(curap(3:4)-textpos)/2 textpos]; + else, + % adjust for pixel roundoff + textpos = textpos * min(curapscreen(3:4)./textpos); + curap = [curap(1:2)+(curap(3:4)-textpos)/2 textpos]; + end; + end; + if ARROW_PERSP_WARN & ~strcmp(get(curax,'Projection'),'orthographic'), + ARROW_PERSP_WARN = 0; + warning([upper(mfilename) ' does not yet work right for 3-D perspective projection.']); + end; + % adjust limits for log scale on axes + curxyzlog = [strcmp(get(curax,'XScale'),'log'); ... + strcmp(get(curax,'YScale'),'log'); ... + strcmp(get(curax,'ZScale'),'log')]; + if (any(curxyzlog)), + ii = find([curxyzlog;curxyzlog]); + if (any(axl(ii)<=0)), + error([upper(mfilename) ' does not support non-positive limits on log-scaled axes.']); + else, + axl(ii) = log10(axl(ii)); + end; + end; + % correct for 'reverse' direction on axes; + curreverse = [strcmp(get(curax,'XDir'),'reverse'); ... + strcmp(get(curax,'YDir'),'reverse'); ... + strcmp(get(curax,'ZDir'),'reverse')]; + ii = find(curreverse); + if ~isempty(ii), + axl(ii,[1 2])=-axl(ii,[2 1]); + end; + % compute the range of 2-D values + curT = get(curax,'Xform'); + lim = curT*[0 1 0 1 0 1 0 1;0 0 1 1 0 0 1 1;0 0 0 0 1 1 1 1;1 1 1 1 1 1 1 1]; + lim = lim(1:2,:)./([1;1]*lim(4,:)); + curlimmin = min(lim')'; + curlimrange = max(lim')' - curlimmin; + curinvT = inv(curT); + if (~oneax), + curT = curT.'; + curinvT = curinvT.'; + curT = curT(:); + curinvT = curinvT(:); + end; + % check which arrows to which cur corresponds + ii = find((ax==curax)&(page==curpage)); + oo = ones(1,length(ii)); + axr(:,ii) = diff(axl')' * oo; + axm(:,ii) = axl(:,1) * oo; + axrev(:,ii) = curreverse * oo; + ap(:,ii) = curap(3:4)' * oo; + xyzlog(:,ii) = curxyzlog * oo; + limmin(:,ii) = curlimmin * oo; + limrange(:,ii) = curlimrange * oo; + if (oneax), + T = curT; + invT = curinvT; + else, + T(:,ii) = curT * oo; + invT(:,ii) = curinvT * oo; + end; + axnotdone(ii) = zeros(1,length(ii)); +end; +oldaxlims(oldaxlims(:,1)==0,:)=[]; + +% correct for log scales +curxyzlog = xyzlog.'; +ii = find(curxyzlog(:)); +if ~isempty(ii), + start( ii) = real(log10(start( ii))); + stop( ii) = real(log10(stop( ii))); + if (all(imag(crossdir)==0)), % pulled (ii) subscript on crossdir, 12/5/96 eaj + crossdir(ii) = real(log10(crossdir(ii))); + end; +end; + +% correct for reverse directions +ii = find(axrev.'); +if ~isempty(ii), + start( ii) = -start( ii); + stop( ii) = -stop( ii); + crossdir(ii) = -crossdir(ii); +end; + +% transpose start/stop values +start = start.'; +stop = stop.'; + +% take care of defaults, page was done above +ii=find(isnan(start(:) )); if ~isempty(ii), start(ii) = axm(ii)+axr(ii)/2; end; +ii=find(isnan(stop(:) )); if ~isempty(ii), stop(ii) = axm(ii)+axr(ii)/2; end; +ii=find(isnan(crossdir(:) )); if ~isempty(ii), crossdir(ii) = zeros(length(ii),1); end; +ii=find(isnan(len )); if ~isempty(ii), len(ii) = ones(length(ii),1)*deflen; end; +ii=find(isnan(baseangle )); if ~isempty(ii), baseangle(ii) = ones(length(ii),1)*defbaseangle; end; +ii=find(isnan(tipangle )); if ~isempty(ii), tipangle(ii) = ones(length(ii),1)*deftipangle; end; +ii=find(isnan(wid )); if ~isempty(ii), wid(ii) = ones(length(ii),1)*defwid; end; +ii=find(isnan(ends )); if ~isempty(ii), ends(ii) = ones(length(ii),1)*defends; end; + +% transpose rest of values +len = len.'; +baseangle = baseangle.'; +tipangle = tipangle.'; +wid = wid.'; +page = page.'; +crossdir = crossdir.'; +ends = ends.'; +ax = ax.'; + +% given x, a 3xN matrix of points in 3-space; +% want to convert to X, the corresponding 4xN 2-space matrix +% +% tmp1=[(x-axm)./axr; ones(1,size(x,1))]; +% if (oneax), X=T*tmp1; +% else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T.*tmp1; +% tmp2=zeros(4,4*N); tmp2(:)=tmp1(:); +% X=zeros(4,N); X(:)=sum(tmp2)'; end; +% X = X ./ (ones(4,1)*X(4,:)); + +% for all points with start==stop, start=stop-(verysmallvalue)*(up-direction); +ii = find(all(start==stop)); +if ~isempty(ii), + % find an arrowdir vertical on screen and perpendicular to viewer + % transform to 2-D + tmp1 = [(stop(:,ii)-axm(:,ii))./axr(:,ii);ones(1,length(ii))]; + if (oneax), twoD=T*tmp1; + else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T(:,ii).*tmp1; + tmp2=zeros(4,4*length(ii)); tmp2(:)=tmp1(:); + twoD=zeros(4,length(ii)); twoD(:)=sum(tmp2)'; end; + twoD=twoD./(ones(4,1)*twoD(4,:)); + % move the start point down just slightly + tmp1 = twoD + [0;-1/1000;0;0]*(limrange(2,ii)./ap(2,ii)); + % transform back to 3-D + if (oneax), threeD=invT*tmp1; + else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=invT(:,ii).*tmp1; + tmp2=zeros(4,4*length(ii)); tmp2(:)=tmp1(:); + threeD=zeros(4,length(ii)); threeD(:)=sum(tmp2)'; end; + start(:,ii) = (threeD(1:3,:)./(ones(3,1)*threeD(4,:))).*axr(:,ii)+axm(:,ii); +end; + +% compute along-arrow points +% transform Start points + tmp1=[(start-axm)./axr;ones(1,narrows)]; + if (oneax), X0=T*tmp1; + else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T.*tmp1; + tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:); + X0=zeros(4,narrows); X0(:)=sum(tmp2)'; end; + X0=X0./(ones(4,1)*X0(4,:)); +% transform Stop points + tmp1=[(stop-axm)./axr;ones(1,narrows)]; + if (oneax), Xf=T*tmp1; + else, tmp1=[tmp1;tmp1;tmp1;tmp1]; tmp1=T.*tmp1; + tmp2=zeros(4,4*narrows); tmp2(:)=tmp1(:); + Xf=zeros(4,narrows); Xf(:)=sum(tmp2)'; end; + Xf=Xf./(ones(4,1)*Xf(4,:)); +% compute pixel distance between points + D = sqrt(sum(((Xf(1:2,:)-X0(1:2,:)).*(ap./limrange)).^2)); +% compute and modify along-arrow distances + len1 = len; + len2 = len - (len.*tan(tipangle/180*pi)-wid/2).*tan((90-baseangle)/180*pi); + slen0 = zeros(1,narrows); + slen1 = len1 .* ((ends==2)|(ends==3)); + slen2 = len2 .* ((ends==2)|(ends==3)); + len0 = zeros(1,narrows); + len1 = len1 .* ((ends==1)|(ends==3)); + len2 = len2 .* ((ends==1)|(ends==3)); + % for no start arrowhead + ii=find((ends==1)&(D0), set(H,extraprops{:}); end; + % handle choosing arrow Start and/or Stop locations if unspecified + [H,oldaxlims,errstr] = arrow_clicks(H,ud,x,y,z,ax,oldaxlims); + if ~isempty(errstr), error([upper(mfilename) ' got ' errstr]); end; + % set the output + if (nargout>0), h=H; end; + % make sure the axis limits did not change + if isempty(oldaxlims), + ARROW_AXLIMITS = []; + else, + lims = get(oldaxlims(:,1),{'XLim','YLim','ZLim'})'; + lims = reshape(cat(2,lims{:}),6,size(lims,2)); + %mask = arrow_is2DXY(oldaxlims(:,1)); + %oldaxlims(mask,6:7) = lims(5:6,mask)'; + ARROW_AXLIMITS = oldaxlims(find(any(oldaxlims(:,2:7)'~=lims)),:); + if ~isempty(ARROW_AXLIMITS), + warning(arrow_warnlimits(ARROW_AXLIMITS,narrows)); + end; + end; +else, + % don't create the patch, just return the data + h=x; + yy=y; + zz=z; +end; + + + +function out = arrow_defcheck(in,def,prop) +% check if we got 'default' values + out = in; + if ~isstr(in), return; end; + if size(in,1)==1 & strncmp(lower(in),'def',3), + out = def; + elseif ~isempty(prop), + error([upper(mfilename) ' does not recognize ''' in(:)' ''' as a valid ''' prop ''' string.']); + end; + + + +function [H,oldaxlims,errstr] = arrow_clicks(H,ud,x,y,z,ax,oldaxlims) +% handle choosing arrow Start and/or Stop locations if necessary + errstr = ''; + if isempty(H)|isempty(ud)|isempty(x), return; end; + % determine which (if any) need Start and/or Stop + needStart = all(isnan(ud(:,1:3)'))'; + needStop = all(isnan(ud(:,4:6)'))'; + mask = any(needStart|needStop); + if ~any(mask), return; end; + ud(~mask,:)=[]; ax(:,~mask)=[]; + x(:,~mask)=[]; y(:,~mask)=[]; z(:,~mask)=[]; + % make them invisible for the time being + set(H,'Visible','off'); + % save the current axes and limits modes; set to manual for the time being + oldAx = gca; + limModes=get(ax(:),{'XLimMode','YLimMode','ZLimMode'}); + set(ax(:),{'XLimMode','YLimMode','ZLimMode'},{'manual','manual','manual'}); + % loop over each arrow that requires attention + jj = find(mask); + for ii=1:length(jj), + h = H(jj(ii)); + axes(ax(ii)); + % figure out correct call + if needStart(ii), prop='Start'; else, prop='Stop'; end; + [wasInterrupted,errstr] = arrow_click(needStart(ii)&needStop(ii),h,prop,ax(ii)); + % handle errors and control-C + if wasInterrupted, + delete(H(jj(ii:end))); + H(jj(ii:end))=[]; + oldaxlims(jj(ii:end),:)=[]; + break; + end; + end; + % restore the axes and limit modes + axes(oldAx); + set(ax(:),{'XLimMode','YLimMode','ZLimMode'},limModes); + +function [wasInterrupted,errstr] = arrow_click(lockStart,H,prop,ax) +% handle the clicks for one arrow + fig = get(ax,'Parent'); + % save some things + oldFigProps = {'Pointer','WindowButtonMotionFcn','WindowButtonUpFcn'}; + oldFigValue = get(fig,oldFigProps); + oldArrowProps = {'EraseMode'}; + oldArrowValue = get(H,oldArrowProps); + set(H,'EraseMode','background'); %because 'xor' makes shaft invisible unless Width>1 + global ARROW_CLICK_H ARROW_CLICK_PROP ARROW_CLICK_AX ARROW_CLICK_USE_Z + ARROW_CLICK_H=H; ARROW_CLICK_PROP=prop; ARROW_CLICK_AX=ax; + ARROW_CLICK_USE_Z=~arrow_is2DXY(ax)|~arrow_planarkids(ax); + set(fig,'Pointer','crosshair'); + % set up the WindowButtonMotion so we can see the arrow while moving around + set(fig,'WindowButtonUpFcn','set(gcf,''WindowButtonUpFcn'','''')', ... + 'WindowButtonMotionFcn',''); + if ~lockStart, + set(H,'Visible','on'); + set(fig,'WindowButtonMotionFcn',[mfilename '(''callback'',''motion'');']); + end; + % wait for the button to be pressed + [wasKeyPress,wasInterrupted,errstr] = arrow_wfbdown(fig); + % if we wanted to click-drag, set the Start point + if lockStart & ~wasInterrupted, + pt = arrow_point(ARROW_CLICK_AX,ARROW_CLICK_USE_Z); + feval(mfilename,H,'Start',pt,'Stop',pt); + set(H,'Visible','on'); + ARROW_CLICK_PROP='Stop'; + set(fig,'WindowButtonMotionFcn',[mfilename '(''callback'',''motion'');']); + % wait for the mouse button to be released + eval('waitfor(fig,''WindowButtonUpFcn'','''');','wasInterrupted=1;'); + if wasInterrupted, errstr=lasterr; end; + end; + if ~wasInterrupted, feval(mfilename,'callback','motion'); end; + % restore some things + set(gcf,oldFigProps,oldFigValue); + set(H,oldArrowProps,oldArrowValue); + +function arrow_callback(varargin) +% handle redrawing callbacks + if nargin==0, return; end; + str = varargin{1}; + if ~isstr(str), error([upper(mfilename) ' got an invalid Callback command.']); end; + s = lower(str); + if strcmp(s,'motion'), + % motion callback + global ARROW_CLICK_H ARROW_CLICK_PROP ARROW_CLICK_AX ARROW_CLICK_USE_Z + feval(mfilename,ARROW_CLICK_H,ARROW_CLICK_PROP,arrow_point(ARROW_CLICK_AX,ARROW_CLICK_USE_Z)); + drawnow; + else, + error([upper(mfilename) ' does not recognize ''' str(:).' ''' as a valid Callback option.']); + end; + +function out = arrow_point(ax,use_z) +% return the point on the given axes + if nargin==0, ax=gca; end; + if nargin<2, use_z=~arrow_is2DXY(ax)|~arrow_planarkids(ax); end; + out = get(ax,'CurrentPoint'); + out = out(1,:); + if ~use_z, out=out(1:2); end; + +function [wasKeyPress,wasInterrupted,errstr] = arrow_wfbdown(fig) +% wait for button down ignoring object ButtonDownFcn's + if nargin==0, fig=gcf; end; + errstr = ''; + % save ButtonDownFcn values + objs = findobj(fig); + buttonDownFcns = get(objs,'ButtonDownFcn'); + mask=~strcmp(buttonDownFcns,''); objs=objs(mask); buttonDownFcns=buttonDownFcns(mask); + set(objs,'ButtonDownFcn',''); + % save other figure values + figProps = {'KeyPressFcn','WindowButtonDownFcn'}; + figValue = get(fig,figProps); + % do the real work + set(fig,'KeyPressFcn','set(gcf,''KeyPressFcn'','''',''WindowButtonDownFcn'','''');', ... + 'WindowButtonDownFcn','set(gcf,''WindowButtonDownFcn'','''')'); + lasterr(''); + wasInterrupted=0; eval('waitfor(fig,''WindowButtonDownFcn'','''');','wasInterrupted=1;'); + wasKeyPress = ~wasInterrupted & strcmp(get(fig,'KeyPressFcn'),''); + if wasInterrupted, errstr=lasterr; end; + % restore ButtonDownFcn and other figure values + set(objs,'ButtonDownFcn',buttonDownFcns); + set(fig,figProps,figValue); + + + +function [out,is2D] = arrow_is2DXY(ax) +% check if axes are 2-D X-Y plots + % may not work for modified camera angles, etc. + out = zeros(size(ax)); % 2-D X-Y plots + is2D = out; % any 2-D plots + views = get(ax(:),{'View'}); + views = cat(1,views{:}); + out(:) = abs(views(:,2))==90; + is2D(:) = out(:) | all(rem(views',90)==0)'; + +function out = arrow_planarkids(ax) +% check if axes descendents all have empty ZData (lines,patches,surfaces) + out = logical(ones(size(ax))); + allkids = get(ax(:),{'Children'}); + for k=1:length(allkids), + kids = get([findobj(allkids{k},'flat','Type','line') + findobj(allkids{k},'flat','Type','patch') + findobj(allkids{k},'flat','Type','surface')],{'ZData'}); + for j=1:length(kids), + if ~isempty(kids{j}), out(k)=logical(0); break; end; + end; + end; + + + +function arrow_fixlimits(axlimits) +% reset the axis limits as necessary + if isempty(axlimits), disp([upper(mfilename) ' does not remember any axis limits to reset.']); end; + for k=1:size(axlimits,1), + if any(get(axlimits(k,1),'XLim')~=axlimits(k,2:3)), set(axlimits(k,1),'XLim',axlimits(k,2:3)); end; + if any(get(axlimits(k,1),'YLim')~=axlimits(k,4:5)), set(axlimits(k,1),'YLim',axlimits(k,4:5)); end; + if any(get(axlimits(k,1),'ZLim')~=axlimits(k,6:7)), set(axlimits(k,1),'ZLim',axlimits(k,6:7)); end; + end; + + + +function out = arrow_WarpToFill(notstretched,manualcamera,curax) +% check if we are in "WarpToFill" mode. + out = strcmp(get(curax,'WarpToFill'),'on'); + % 'WarpToFill' is undocumented, so may need to replace this by + % out = ~( any(notstretched) & any(manualcamera) ); + + + +function out = arrow_warnlimits(axlimits,narrows) +% create a warning message if we've changed the axis limits + msg = ''; + switch (size(axlimits,1)==1) + case 1, msg=''; + case 2, msg='on two axes '; + otherwise, msg='on several axes '; + end; + msg = [upper(mfilename) ' changed the axis limits ' msg ... + 'when adding the arrow']; + if (narrows>1), msg=[msg 's']; end; + out = [msg '.' sprintf('\n') ' Call ' upper(mfilename) ... + ' FIXLIMITS to reset them now.']; + + + +function arrow_copyprops(fm,to) +% copy line properties to patches + props = {'EraseMode','LineStyle','LineWidth','Marker','MarkerSize',... + 'MarkerEdgeColor','MarkerFaceColor','ButtonDownFcn', ... + 'Clipping','DeleteFcn','BusyAction','HandleVisibility', ... + 'Selected','SelectionHighlight','Visible'}; + lineprops = {'Color', props{:}}; + patchprops = {'EdgeColor',props{:}}; + patch2props = {'FaceColor',patchprops{:}}; + fmpatch = strcmp(get(fm,'Type'),'patch'); + topatch = strcmp(get(to,'Type'),'patch'); + set(to( fmpatch& topatch),patch2props,get(fm( fmpatch& topatch),patch2props)); %p->p + set(to(~fmpatch&~topatch),lineprops, get(fm(~fmpatch&~topatch),lineprops )); %l->l + set(to( fmpatch&~topatch),lineprops, get(fm( fmpatch&~topatch),patchprops )); %p->l + set(to(~fmpatch& topatch),patchprops, get(fm(~fmpatch& topatch),lineprops) ,'FaceColor','none'); %l->p + + + +function arrow_props +% display further help info about ARROW properties + c = sprintf('\n'); + disp([c ... + 'ARROW Properties: Default values are given in [square brackets], and other' c ... + ' acceptable equivalent property names are in (parenthesis).' c c ... + ' Start The starting points. For N arrows, B' c ... + ' this should be a Nx2 or Nx3 matrix. /|\ ^' c ... + ' Stop The end points. For N arrows, this /|||\ |' c ... + ' should be a Nx2 or Nx3 matrix. //|||\\ L|' c ... + ' Length Length of the arrowhead (in pixels on ///|||\\\ e|' c ... + ' screen, points on a page). [16] (Len) ////|||\\\\ n|' c ... + ' BaseAngle Angle (degrees) of the base angle /////|D|\\\\\ g|' c ... + ' ADE. For a simple stick arrow, use //// ||| \\\\ t|' c ... + ' BaseAngle=TipAngle. [90] (Base) /// ||| \\\ h|' c ... + ' TipAngle Angle (degrees) of tip angle ABC. //<----->|| \\ |' c ... + ' [16] (Tip) / base ||| \ V' c ... + ' Width Width of the base in pixels. Not E angle ||<-------->C' c ... + ' the ''LineWidth'' prop. [0] (Wid) |||tipangle' c ... + ' Page If provided, non-empty, and not NaN, |||' c ... + ' this causes ARROW to use hardcopy |||' c ... + ' rather than onscreen proportions. A' c ... + ' This is important if screen aspect --> <-- width' c ... + ' ratio and hardcopy aspect ratio are ----CrossDir---->' c ... + ' vastly different. []' c... + ' CrossDir A vector giving the direction towards which the fletches' c ... + ' on the arrow should go. [computed such that it is perpen-' c ... + ' dicular to both the arrow direction and the view direction' c ... + ' (i.e., as if it was pasted on a normal 2-D graph)] (Note' c ... + ' that CrossDir is a vector. Also note that if an axis is' c ... + ' plotted on a log scale, then the corresponding component' c ... + ' of CrossDir must also be set appropriately, i.e., to 1 for' c ... + ' no change in that direction, >1 for a positive change, >0' c ... + ' and <1 for negative change.)' c ... + ' NormalDir A vector normal to the fletch direction (CrossDir is then' c ... + ' computed by the vector cross product [Line]x[NormalDir]). []' c ... + ' (Note that NormalDir is a vector. Unlike CrossDir,' c ... + ' NormalDir is used as is regardless of log-scaled axes.)' c ... + ' Ends Set which end has an arrowhead. Valid values are ''none'',' c ... + ' ''stop'', ''start'', and ''both''. [''stop''] (End)' c... + ' ObjectHandles Vector of handles to previously-created arrows to be' c ... + ' updated or line objects to be converted to arrows.' c ... + ' [] (Object,Handle)' c ]); + + + +function out = arrow_demo + % demo + % create the data + [x,y,z] = peaks; + [ddd,out.iii]=max(z(:)); + out.axlim = [min(x(:)) max(x(:)) min(y(:)) max(y(:)) min(z(:)) max(z(:))]; + + % modify it by inserting some NaN's + [m,n] = size(z); + m = floor(m/2); + n = floor(n/2); + z(1:m,1:n) = NaN*ones(m,n); + + % graph it + clf('reset'); + out.hs=surf(x,y,z); + out.x=x; out.y=y; out.z=z; + xlabel('x'); ylabel('y'); + +function h = arrow_demo3(in) + % set the view + axlim = in.axlim; + axis(axlim); + zlabel('z'); + %set(in.hs,'FaceColor','interp'); + view(viewmtx(-37.5,30,20)); + title(['Demo of the capabilities of the ARROW function in 3-D']); + + % Normal blue arrow + h1 = feval(mfilename,[axlim(1) axlim(4) 4],[-.8 1.2 4], ... + 'EdgeColor','b','FaceColor','b'); + + % Normal white arrow, clipped by the surface + h2 = feval(mfilename,axlim([1 4 6]),[0 2 4]); + t=text(-2.4,2.7,7.7,'arrow clipped by surf'); + + % Baseangle<90 + h3 = feval(mfilename,[3 .125 3.5],[1.375 0.125 3.5],30,50); + t2=text(3.1,.125,3.5,'local maximum'); + + % Baseangle<90, fill and edge colors different + h4 = feval(mfilename,axlim(1:2:5)*.5,[0 0 0],36,60,25, ... + 'EdgeColor','b','FaceColor','c'); + t3=text(axlim(1)*.5,axlim(3)*.5,axlim(5)*.5-.75,'origin'); + set(t3,'HorizontalAlignment','center'); + + % Baseangle>90, black fill + h5 = feval(mfilename,[-2.9 2.9 3],[-1.3 .4 3.2],30,120,[],6, ... + 'EdgeColor','r','FaceColor','k','LineWidth',2); + + % Baseangle>90, no fill + h6 = feval(mfilename,[-2.9 2.9 1.3],[-1.3 .4 1.5],30,120,[],6, ... + 'EdgeColor','r','FaceColor','none','LineWidth',2); + + % Stick arrow + h7 = feval(mfilename,[-1.6 -1.65 -6.5],[0 -1.65 -6.5],[],16,16); + t4=text(-1.5,-1.65,-7.25,'global mininum'); + set(t4,'HorizontalAlignment','center'); + + % Normal, black fill + h8 = feval(mfilename,[-1.4 0 -7.2],[-1.4 0 -3],'FaceColor','k'); + t5=text(-1.5,0,-7.75,'local minimum'); + set(t5,'HorizontalAlignment','center'); + + % Gray fill, crossdir specified, 'LineStyle' -- + h9 = feval(mfilename,[-3 2.2 -6],[-3 2.2 -.05],36,[],27,6,[],[0 -1 0], ... + 'EdgeColor','k','FaceColor',.75*[1 1 1],'LineStyle','--'); + + % a series of normal arrows, linearly spaced, crossdir specified + h10y=(0:4)'/3; + h10 = feval(mfilename,[-3*ones(size(h10y)) h10y -6.5*ones(size(h10y))], ... + [-3*ones(size(h10y)) h10y -.05*ones(size(h10y))], ... + 12,[],[],[],[],[0 -1 0]); + + % a series of normal arrows, linearly spaced + h11x=(1:.33:2.8)'; + h11 = feval(mfilename,[h11x -3*ones(size(h11x)) 6.5*ones(size(h11x))], ... + [h11x -3*ones(size(h11x)) -.05*ones(size(h11x))]); + + % series of magenta arrows, radially oriented, crossdir specified + h12x=2; h12y=-3; h12z=axlim(5)/2; h12xr=1; h12zr=h12z; ir=.15;or=.81; + h12t=(0:11)'/6*pi; + h12 = feval(mfilename, ... + [h12x+h12xr*cos(h12t)*ir h12y*ones(size(h12t)) ... + h12z+h12zr*sin(h12t)*ir],[h12x+h12xr*cos(h12t)*or ... + h12y*ones(size(h12t)) h12z+h12zr*sin(h12t)*or], ... + 10,[],[],[],[], ... + [-h12xr*sin(h12t) zeros(size(h12t)) h12zr*cos(h12t)],... + 'FaceColor','none','EdgeColor','m'); + + % series of normal arrows, tangentially oriented, crossdir specified + or13=.91; h13t=(0:.5:12)'/6*pi; + locs = [h12x+h12xr*cos(h13t)*or13 h12y*ones(size(h13t)) h12z+h12zr*sin(h13t)*or13]; + h13 = feval(mfilename,locs(1:end-1,:),locs(2:end,:),6); + + % arrow with no line ==> oriented downwards + h14 = feval(mfilename,[3 3 .100001],[3 3 .1],30); + t6=text(3,3,3.6,'no line'); set(t6,'HorizontalAlignment','center'); + + % arrow with arrowheads at both ends + h15 = feval(mfilename,[-.5 -3 -3],[1 -3 -3],'Ends','both','FaceColor','g', ... + 'Length',20,'Width',3,'CrossDir',[0 0 1],'TipAngle',25); + + h=[h1;h2;h3;h4;h5;h6;h7;h8;h9;h10;h11;h12;h13;h14;h15]; + +function h = arrow_demo2(in) + axlim = in.axlim; + dolog = 1; + if (dolog), set(in.hs,'YData',10.^get(in.hs,'YData')); end; + shading('interp'); + view(2); + title(['Demo of the capabilities of the ARROW function in 2-D']); + hold on; [C,H]=contour(in.x,in.y,in.z,20,'-'); hold off; + for k=H', + set(k,'ZData',(axlim(6)+1)*ones(size(get(k,'XData'))),'Color','k'); + if (dolog), set(k,'YData',10.^get(k,'YData')); end; + end; + if (dolog), axis([axlim(1:2) 10.^axlim(3:4)]); set(gca,'YScale','log'); + else, axis(axlim(1:4)); end; + + % Normal blue arrow + start = [axlim(1) axlim(4) axlim(6)+2]; + stop = [in.x(in.iii) in.y(in.iii) axlim(6)+2]; + if (dolog), start(:,2)=10.^start(:,2); stop(:,2)=10.^stop(:,2); end; + h1 = feval(mfilename,start,stop,'EdgeColor','b','FaceColor','b'); + + % three arrows with varying fill, width, and baseangle + start = [-3 -3 10; -3 -1.5 10; -1.5 -3 10]; + stop = [-.03 -.03 10; -.03 -1.5 10; -1.5 -.03 10]; + if (dolog), start(:,2)=10.^start(:,2); stop(:,2)=10.^stop(:,2); end; + h2 = feval(mfilename,start,stop,24,[90;60;120],[],[0;0;4],'Ends',str2mat('both','stop','stop')); + set(h2(2),'EdgeColor',[0 .35 0],'FaceColor',[0 .85 .85]); + set(h2(3),'EdgeColor','r','FaceColor',[1 .5 1]); + h=[h1;h2]; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/dot_to_graph.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/dot_to_graph.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,121 @@ +function [Adj, labels, x, y] = dot_to_graph(filename) +% [Adj, labels, x, y] = dot_to_graph(filename) +% Extract a matrix representation, node labels, and node position coordinates +% from a file in GraphViz format http://www.research.att.com/sw/tools/graphviz +% +% INPUTS: +% 'filename' - the file in DOT format containing the graph layout. +% OUTPUT: +% 'Adj' - an adjacency matrix representation of the graph in 'filename'; +% 'labels' - a character array with the names of the nodes of the graph; +% 'x' - a row vector with the x-coordinates of the nodes in 'filename'; +% 'y' - a row vector with the y-coordinates of the nodes in 'filename'. +% +% WARNINGS: not guaranted to parse ANY GraphViz file. Debugged on undirected +% sample graphs from GraphViz(Heawood, Petersen, ER, ngk10_4, process). +% Complaines about RecursionLimit set only to 500 on huge graphs. +% Ignores singletons (disjoint nodes). +% Sample DOT code "ABC.dot", read by [Adj, labels, x, y] = dot_to_graph('ABC.dot') +% digraph G { +% A [pos="28,31"]; +% B [pos="74,87"]; +% A -- B [pos="e,61,71 41,47 46,53 50,58 55,64"]; +% } +% last modified: Jan 2004 +% by Alexi Savov: asavov @wustl.edu | http://artsci.wustl.edu/~azsavov +% Leon Peshkin: pesha @ai.mit.edu | http://www.ai.mit.edu/~pesha +% Tom Minka + +if ~exist(filename) % Checks whether the specified file exists. + error('* * * File does not exist or could not be found. * * *'); +end; + +lines = textread(filename,'%s','delimiter','\n','commentstyle','c'); % Read file into cell array +dot_lines = strvcat(lines); % of lines, ignoring C-style comments + +if findstr(dot_lines(1,:), 'graph ') == [] % Is this a DOT file ? + error('* * * File does not appear to be in valid DOT format. * * *'); +end; + +Nlns = size(dot_lines,1); % The number of lines; +nodes = {}; +unread = 1:Nlns; % 'unread' list of lines which has not been examined yet +edge_id = 1; +Adj = []; +for line_ndx = 1:Nlns % This section sets the adjacency matrix A(Lnode,Rnode) = edge_id. + line = dot_lines(line_ndx,:); + Ddash_pos = strfind(line, ' -- ') + 1; % double dash positions + arrow_pos = strfind(line, ' -> ') + 1; % arrow dash positions + tokens = strread(line,'%s','delimiter',' "'); + left_bound = 1; + for dash_pos = [Ddash_pos arrow_pos]; % if empty - not a POS line + Lnode = sscanf(line(left_bound:dash_pos -2), '%s'); + Rnode = sscanf(line(dash_pos +3 : length(line)-1),'%s',1); + Lndx = strmatch(Lnode, nodes, 'exact'); + Rndx = strmatch(Rnode, nodes, 'exact'); + if isempty(Lndx) % extend our list of nodes + nodes{end+1} = Lnode; + Lndx = length(nodes); + end + if isempty(Rndx) + nodes{end+1} = Rnode; + Rndx = length(nodes); + end + Adj(Lndx, Rndx) = edge_id; + if ismember(dash_pos, Ddash_pos) % The edge is undirected, A(Rndx,LndxL) is also set to 1; + Adj(Rndx, Lndx) = edge_id; + end + edge_id = edge_id + 1; + left_bound = dash_pos + 3; + unread = setdiff(unread, line_ndx); + end +end +Nvrt = length(nodes); % number of vertices we found [Do we ever have singleton vertices ???] +% nodes = strvcat(nodes); % convert to the searchable array +x = zeros(1, Nvrt); +y = zeros(1, Nvrt); +labels = nodes; +% Find node's position coordinates if they are contained in 'filename'. +for line_ndx = unread % Look for node's coordinates among the 'unread' lines. + line = dot_lines(line_ndx,:); + bra_pos = strfind(line, '['); % has to have "[" if it has the label + lst_node = 0; + for node = 1:Nvrt % look through the list of nodes + % THE NEXT STATEMENT we assume no node is substring of any other node + lbl_pos = strfind(line, nodes{node}); + if (~isempty(lbl_pos) & ~isempty(bra_pos) & (x(node) == 0)) % make sure we have not seen it + if (lbl_pos(1) < bra_pos(1)) % label has to be to the left of bracket + lst_node = node; + end + end + end + if lst_node + pos_pos = strfind(line, 'pos'); % position of the "pos" + if ~isempty(pos_pos) % this line contains SOME position + [node_pos] = sscanf(line(pos_pos:end), ' pos = "%d,%d"')'; + x(lst_node) = node_pos(1); + y(lst_node) = node_pos(2); + end + % minka + label_pos = strfind(line, 'label'); % position of the "label" + if ~isempty(label_pos) + label_end = strfind(line(label_pos:end),','); + labels{lst_node} = unquote(line(label_pos+(6:label_end(1)-2))); + end + end +end + +if (isempty(find(x)) & (nargout > 2)) % If coordinates were requested, but not found in 'filename'. + warning('File does not contain node coordinates.'); +end; +if ~(size(Adj,1)==size(Adj,2)) % Make sure Adj is a square matrix. ? + Adj = eye(max(size(Adj)),size(Adj,1))*Adj*eye(size(Adj,2),max(size(Adj))); +end; +x = .9*(x-min(x))/range(x)+.05; % normalise and push off margins +y = .9*(y-min(y))/range(y)+.05; + + + +function s = unquote(s) + +s = strrep(s,'"',''); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/draw_dbn.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/draw_dbn.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,99 @@ +function [x, y, h] = draw_dbn(adj, inter, flip_intra, K, labels, node_t, x, y) +% DRAW_LAYOUT_DBN Draws a layout for a Dynamical Belief Network +% +% [] = DRAW_LAYOUT_DBN(INTRA, INTER, ) +% +% Inputs : +% INTRA, INTER : Adjacency matrices +% FLIP_FLAG : Transposes the DAG layout obtained from INTRA connections +% If X1, Y1 are specified, FLIP_FLAG has no effect. +% K : Unfold K times +% LABELS - if -1, we use 1:N*K +% Rest : See DRAW_LAYOUT +% +% Outputs : +% Xi, Yi : Coordinates of nodes (for i'th timeslice) on the unit square +% H : Object Handles +% +% Usage Example : draw_layout_dbn(intra, inter, 1); +% draw_layout_dbn(intra, inter); +% +% Note : +% See also DRAW_GRAPH + +% Uses : DRAW_GRAPH + +% Change History : +% Date Time Prog Note +% 17-Apr-2000 1:02 PM ATC Created under MATLAB 5.3.1.29215a (R11.1) + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +N = size(adj,1); +if nargin<3, + flip_intra = 0; +end; + +if nargin<4, + K = 2; +end; + +if K<2 | K>7, error('2<=K<=7 must hold..'); end; + + +if nargin<5 +% labels = cellstr(char(zeros(N,1)+double('+'))); +% labels = cellstr(int2str((1:N)')); + labels = cellstr(char((0:N-1)'+double('a'))); +end; + +if nargin<6, + node_t = zeros(N,1); +% node_t = rand(N,1) > 0.5; +end; + +if nargin<7, + [x1 y1] = make_layout(adj); + if flip_intra, tmp = x1; x1 = y1; y1 = tmp; end; +end; + +mid = round(K/2); + + +xi = x1(:)-1; +x = []; +y = repmat(y1(:), [K 1]); +node_t2 = repmat(node_t(:), [K 1]); + +if isa(labels,'double') & labels==-1 % KPM + lb = num2strcell(1:N*K); +else + lb = {}; + for i=1:K, + labels1 = labels(:); + if i==mid, str = ''; else str = sprintf('%+d',i-mid); end; + for i=1:N, + labels1{i} = [labels1{i} '(t' str ')']; + end; + lb = [lb; labels1(:)]; + end; +end + +dag = zeros(N*K); + +for i=1:K, + xi = xi+1; + x = [x; xi]; + + idx = ((i-1)*N+1):i*N; + dag(idx,idx) = adj; + if i .8; +% Adj2 = triu(Adj,1)+ triu(Adj,1)' + diag(zeros(size,1)); +% draw_dot(Adj2) + +% Original: Leon Peshkin +% Modified by Tom Minka + +% minka +N = size(adj,1); +unique_labels = cellstr(num2str((1:N)','%-1d')); +labels = unique_labels; +isbox = zeros(N,1); +rotate_flag = 1; +tolerance = 0.001; +options = ''; +for i = 1:2:length(varargin) + switch varargin{i} + case 'node_label', labels = varargin{i+1}; + % replace with unique labels + varargin{i+1} = unique_labels; + case 'isbox', isbox = varargin{i+1}; + case 'rotate', rotate_flag = varargin{i+1}; + case 'tolerance', tolerance = varargin{i+1}; + case 'start', start = varargin{i+1}; + options = [options ' -Gstart=' num2str(start)]; + case 'options', options = [options ' ' varargin{i+1}]; + end +end + +if ispc, shell = 'dos'; else, shell = 'unix'; end % Which OS ? + +cmdline = strcat(shell,'(''neato -V'')'); +status = eval(cmdline); +%[status, result] = dos('neato -V'); % request version to check NEATO +if status == 1, fprintf('Complaining \n'); exit, end + +tmpDOTfile = '_GtDout.dot'; % to be platform independant no use of directories +tmpLAYOUT = '_LAYout.dot'; +graph_to_dot(adj > 0, 'filename', tmpDOTfile, 'node_label', unique_labels, varargin{:}); % save in file + +cmdline = strcat([shell '(''neato -Tdot ' tmpDOTfile options ' -o ' tmpLAYOUT ''')']); % preserve trailing spaces +status = eval(cmdline); % get NEATO todo layout + +[adj, permuted_labels, x, y] = dot_to_graph(tmpLAYOUT); % load layout +delete(tmpLAYOUT); delete(tmpDOTfile); % clean up temporary files + +% permute the original arguments to match permuted_labels. +order = []; +for i = 1:length(permuted_labels) + j = strmatch(permuted_labels{i},unique_labels,'exact'); + order(i) = j(1); +end +labels = labels(order); +isbox = isbox(order); +if rotate_flag + [x,y] = best_rotation(x,y,tolerance); +end + +figure(1); clf; axis square % now plot +[x, y, h] = draw_graph(adj>0, labels, isbox, x, y, varargin{:}); + + +function [x,y] = best_rotation(x,y,h) +% Rotate the points to maximize the horizontal and vertical alignment. +% Written by Tom Minka. + +xm = mean(x); +ym = mean(y); +xr = max(x)-min(x); +yr = max(y)-min(y); +x = (x-xm)/xr; +y = (y-ym)/yr; + +xy = [x(:) y(:)]; +if 1 + angle = fminbnd(@rotation_cost,-pi/4,pi/4,[],xy,h); +else + angles = linspace(-pi/4,pi/4,40); + e = []; + for i = 1:length(angles) + e(i) = rotation_cost(angles(i),xy,h); + end + %figure(2) + %plot(angles*180/pi,e) + angle = angles(argmin(e)); +end +%angle*180/pi +c = cos(angle); s = sin(angle); +xy = xy*[c s; -s c]; + +x = xy(:,1)*xr+xm; +y = xy(:,2)*yr+ym; + + +function e = rotation_cost(angle,xy,h) +% xy is 2-column matrix. +% e is small if many x's and y's are aligned. + +c = cos(angle); s = sin(angle); +xy = xy*[c s; -s c]; +dx = sqdist(xy(:,1)',xy(:,1)'); +dy = sqdist(xy(:,2)',xy(:,2)'); +dx = setdiag(dx,Inf); +dy = setdiag(dy,Inf); +e = sum(exp(-dx(:)/h))+sum(exp(-dy(:)/h)); +e = -e; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/draw_graph.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/draw_graph.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,332 @@ +function [x, y, h] = draw_graph(adj, labels, node_t, x, y, varargin) +% DRAW_LAYOUT Draws a layout for a graph +% +% [X, Y, H] = DRAW_LAYOUT(ADJ, ) +% +% Inputs : +% ADJ : Adjacency matrix (source, sink) +% LABELS : Cell array containing labels +% ISBOX : 1 if node is a box, 0 if oval +% X, Y, : Coordinates of nodes on the unit square +% +% Outputs : +% X, Y : Coordinates of nodes on the unit square +% H : Object handles +% +% Usage Example : [x, y] = draw_layout([0 1;0 0], {'Hidden','Visible'}, [1 0]'); +% +% h(i,1) is the text handle - color +% h(i,2) is the circle handle - facecolor +% +% See also MAKE_LAYOUT + +% Change History : +% Date Time Prog Note +% 13-Apr-2000 9:06 PM ATC Created under MATLAB 5.3.1.29215a (R11.1) +% +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +adj = double(adj); +N = size(adj,1); +if nargin<2, + labels = cellstr(int2str((1:N)')); +end + +if nargin<3, + node_t = zeros(N,1); +else + node_t = node_t(:); +end; + +axis([0 1 0 1]); +set(gca,'XTick',[],'YTick',[],'box','on'); +% axis('square'); +%colormap(flipud(gray)); + +if nargin<4, + [x y] = make_layout(adj); +end; + +idx1 = find(node_t==0); h1 = []; wd1=[]; +if ~isempty(idx1) + [h1 wd1] = textoval(x(idx1), y(idx1), labels(idx1), varargin{:}); +end; + +idx2 = find(node_t~=0); h2 = []; wd2 = []; +if ~isempty(idx2) + [h2 wd2] = textbox(x(idx2), y(idx2), labels(idx2), varargin{:}); +end; + +wd = zeros(size(wd1,1)+size(wd2,1),2); +if ~isempty(idx1), wd(idx1, :) = wd1; end; +if ~isempty(idx2), wd(idx2, :) = wd2; end; + +% bug: this code assumes [x y] is the center of each box and oval, which +% isn't exactly true. +h_edge = []; +for i=1:N, + j = find(adj(i,:)==1); + for k=j, + if x(k)-x(i)==0, + sign = 1; + if y(i)>y(k), alpha = -pi/2; else alpha = pi/2; end; + else + alpha = atan((y(k)-y(i))/(x(k)-x(i))); + if x(i)2, + h = zeros(length(wd),2); + if ~isempty(idx1), + h(idx1,:) = h1; + end; + if ~isempty(idx2), + h(idx2,:) = h2; + end; +end; + +%%%%% + +function [t, wd] = textoval(x, y, str, varargin) +% TEXTOVAL Draws an oval around text objects +% +% [T, WIDTH] = TEXTOVAL(X, Y, STR) +% [..] = TEXTOVAL(STR) % Interactive +% +% Inputs : +% X, Y : Coordinates +% TXT : Strings +% +% Outputs : +% T : Object Handles +% WIDTH : x and y Width of ovals +% +% Usage Example : [t] = textoval('Visit to Asia?'); +% +% +% Note : +% See also TEXTBOX + +% Uses : + +% Change History : +% Date Time Prog Note +% 15-Jun-1998 10:36 AM ATC Created under MATLAB 5.1.0.421 +% 12-Mar-2004 10:00 AM minka Changed placement/sizing. +% +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +temp = []; +textProperties = {'BackgroundColor','Color','FontAngle','FontName','FontSize','FontUnits','FontWeight','Rotation'}; +varargin = argfilter(varargin,textProperties); + +if nargin == 1 + str = x; +end +if ~isa(str,'cell') str=cellstr(str); end; +N = length(str); +wd = zeros(N,2); +for i=1:N, + if nargin == 1 + [x, y] = ginput(1); + end + tx = text(x(i),y(i),str{i},'HorizontalAlignment','center',varargin{:}); + % minka + [ptc wx wy] = draw_oval(tx); + wd(i,:) = [wx wy]; + % draw_oval will paint over the text, so need to redraw it + delete(tx); + tx = text(x(i),y(i),str{i},'HorizontalAlignment','center',varargin{:}); + temp = [temp; tx ptc]; +end +if nargout>0, t = temp; end; + +%%%%%%%%% + + +function [ptc, wx, wy] = draw_oval(tx, x, y) +% Draws an oval box around a tex object +sz = get(tx,'Extent'); +% minka +wy = 2/3*sz(4); +wx = 2/3*sz(3); +x = sz(1)+sz(3)/2; +y = sz(2)+sz(4)/2; +ptc = ellipse(x, y, wx, wy); +set(ptc, 'FaceColor','w'); + + +%%%%%%%%%%%%% + +function [p] = ellipse(x, y, rx, ry, c) +% ELLIPSE Draws Ellipse shaped patch objects +% +% [

] = ELLIPSE(X, Y, Rx, Ry, C) +% +% Inputs : +% X : N x 1 vector of x coordinates +% Y : N x 1 vector of y coordinates +% Rx, Ry : Radii +% C : Color index +% +% +% Outputs : +% P = Handles of Ellipse shaped path objects +% +% Usage Example : [] = ellipse(); +% +% +% Note : +% See also + +% Uses : + +% Change History : +% Date Time Prog Note +% 27-May-1998 9:55 AM ATC Created under MATLAB 5.1.0.421 + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +if (nargin < 2) error('Usage Example : e = ellipse([0 1],[0 -1],[1 0.5],[2 0.5]); '); end; +if (nargin < 3) rx = 0.1; end; +if (nargin < 4) ry = rx; end; +if (nargin < 5) c = 1; end; + +if length(c)==1, c = ones(size(x)).*c; end; +if length(rx)==1, rx = ones(size(x)).*rx; end; +if length(ry)==1, ry = ones(size(x)).*ry; end; + +n = length(x); +p = zeros(size(x)); +t = 0:pi/30:2*pi; +for i=1:n, + px = rx(i)*cos(t)+x(i); + py = ry(i)*sin(t)+y(i); + p(i) = patch(px,py,c(i)); +end; + +if nargout>0, pp = p; end; + +%%%%% + +function [t, wd] = textbox(x,y,str,varargin) +% TEXTBOX Draws A Box around the text +% +% [T, WIDTH] = TEXTBOX(X, Y, STR) +% [..] = TEXTBOX(STR) +% +% Inputs : +% X, Y : Coordinates +% TXT : Strings +% +% Outputs : +% T : Object Handles +% WIDTH : x and y Width of boxes +%% +% Usage Example : t = textbox({'Ali','Veli','49','50'}); +% +% +% Note : +% See also TEXTOVAL + +% Uses : + +% Change History : +% Date Time Prog Note +% 09-Jun-1998 11:43 AM ATC Created under MATLAB 5.1.0.421 +% 12-Mar-2004 10:00 AM minka Changed placement/sizing. +% +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +temp = []; +textProperties = {'BackgroundColor','Color','FontAngle','FontName','FontSize','FontUnits','FontWeight','Rotation'}; +varargin = argfilter(varargin,textProperties); + +if nargin == 1 + str = x; +end +if ~isa(str,'cell') str=cellstr(str); end; +N = length(str); +wd = zeros(N,2); +for i=1:N, + if nargin == 1 + [x, y] = ginput(1); + end + tx = text(x(i),y(i),str{i},'HorizontalAlignment','center',varargin{:}); + % minka + [ptc wx wy] = draw_box(tx); + wd(i,:) = [wx wy]; + % draw_box will paint over the text, so need to redraw it + delete(tx); + tx = text(x(i),y(i),str{i},'HorizontalAlignment','center',varargin{:}); + temp = [temp; tx ptc]; +end; + +if nargout>0, t = temp; end; + + +function [ptc, wx, wy] = draw_box(tx) +% Draws a box around a text object +sz = get(tx,'Extent'); +% minka +wy = 1/2*sz(4); +wx = 1/2*sz(3); +x = sz(1)+sz(3)/2; +y = sz(2)+sz(4)/2; +ptc = patch([x-wx x+wx x+wx x-wx], [y+wy y+wy y-wy y-wy],'w'); +set(ptc, 'FaceColor','w'); + + + +function args = argfilter(args,keep) +%ARGFILTER Remove unwanted arguments. +% ARGFILTER(ARGS,KEEP), where ARGS = {'arg1',value1,'arg2',value2,...}, +% returns a new argument list where only the arguments named in KEEP are +% retained. KEEP is a character array or cell array of strings. + +% Written by Tom Minka + +if ischar(keep) + keep = cellstr(keep); +end +i = 1; +while i < length(args) + if ~ismember(args{i},keep) + args = args(setdiff(1:length(args),[i i+1])); + else + i = i + 2; + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/draw_graph_test.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/draw_graph_test.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,50 @@ +% TEST_LAYOUT Script to test some bayesian net layouts +% + +% Change History : +% Date Time Prog Note +% 13-Apr-2000 10:40 PM ATC Created under MATLAB 5.3.1.29215a (R11.1) + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +%bnet = mk_asia_bnet; +%draw_graph(bnet.dag); + +% Make the following network (from Jensen (1996) p84 fig 4.17) +% 1 +% / | \ +% 2 3 4 +% | | | +% 5 6 7 +% \/ \/ +% 8 9 +% where all arcs point downwards + +disp('plot directed') +clf; + +N = 9; +dag = zeros(N,N); +dag(1,2)=1; dag(1,3)=1; dag(1,4)=1; +dag(2,5)=1; dag(3,6)=1; dag(4,7)=1; +dag(5,8)=1; dag(6,8)=1; dag(6,9)=1; dag(7,9) = 1; + +draw_graph(dag); + +pause +clf +disp('plot undirected') +udag = [dag+dag']; +draw_graph(udag); + +pause +clf +disp('plot mixed') +mg = [dag]; +mg(2,1) = 1; mg(8,5) = 1; +draw_graph(mg); + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/draw_hmm.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/draw_hmm.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,117 @@ +function draw_hmm(A, varargin) +% DRAW_HMM Make a picture of the HMM using dotty +% function draw_hmm(A, ...) +% +% For details on dotty, see http://www.research.att.com/sw/tools/graphviz +% +% If A(i,j) > thresh, we draw and arc from state i to state j. +% +% Optional arguments (name/value pairs) [default] +% +% thresh - [1e-1] +% obsprob - If B(i,o) > 0, we include "o" in the name of state i. +% e.g., if state 5 emits 1,3,7, its label becomes "5: 1 3 7". +% startprob - ifstartprob(i) > 0, the state name will be prefixed with "+". +% endprob - if endprob(i) > 0, the state name will be appended with "-". +% filename - if [], we write to 'tmp.dot', convert this to 'tmp.ps' +% using 'dot -Tps tmp.dot -o tmp.ps', and then call ghostview to display the result. +% dot and gv must be on your system path. +% If filename ~= [], we just generate the dot file, and do not +% convert it to postscript or call ghostview. + +[thresh, B, startprob, endprob, filename] = ... + process_options(varargin, 'thresh', 1e-1, 'obsprob', [], 'startprob', [], 'endprob', [], ... + 'filename', []); + +Q = length(A); + +arclabel = cell(Q,Q); +G = zeros(Q,Q); +for i=1:Q + for j=1:Q + if A(i,j) < thresh + arclabel{i,j} = ''; + else + G(i,j) = 1; + arclabel{i,j} = sprintf('%5.3f', A(i,j)); + end + end +end + + +nodelabel = cell(1,Q); +for i=1:Q + % annotate start/stop states + if ~isempty(startprob) & ~approxeq(startprob(i), 0) + start = '+'; + else + start = ''; + end + if ~isempty(endprob) & ~approxeq(hmm.endprob(i), 0) + stop = '-'; + else + stop = ''; + end + label = sprintf('%s%d%s :', start, i, stop); + + if ~isempty(B) + output_label = mk_output_label(B); + label = strcat(label, output_label); + end + + nodelabel{i} = label; +end + + +if isempty(filename) + filename = 'tmp.dot'; + %mkdot(G, filename, arclabel, nodelabel) + graph_to_dot(G, 'filename', filename, 'arc_label', arclabel, 'node_label', nodelabel); + fprintf('converting from .ps to .dot\n') + !dot -Tps tmp.dot -o tmp.ps + !gv tmp.ps & +else + graph_to_dot(G, 'filename', filename, 'arc_label', arclabel, 'node_label', nodelabel); + %mkdot(G, filename, arclabel, nodelabel) +end + + +%%%%%%%%% + +function label = mk_output_label(B) + +[Q O] = size(B); +label = ''; + +if 0 + % print most probable symbols + for i=1:Q + m = max(B(i,:)); + ndx = find(abs(B(i,:) - repmat(m,1,O)) < 1e-2); + %ndx = find(B(i,:)==m); + %label = sprintf('%d,', ndx); + end +end + +if 0 + % print prob distrib over all symbols + for o=1:O + if approxeq(B(i,o), 0) + % + else + label = strcat(label, sprintf('%d(%3.2f),', o, B(i,o))); + end + end +end + +if 1 + % print all non-zero symbols + chars = ['a' 'b' 'c']; + for o=1:O + if approxeq(B(i,o), 0) + % + else + label = strcat(label, sprintf('%s', chars(o))); + end + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/editGraphGUI.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/editGraphGUI.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +function g +%here is how one creates a function ("callback") which does something +%(prints the node label) when you click on the node's text in Matlab figure. +% +% Leon Peshkin http://www.ai.mit.edu/~pesha +% +%draw_graph(...) + + % "gca" is the current "axes" object, parent of all objects in figure + % "gcbo" is the handle of the object whose callback is being executed + % "findall" gives handles to all elements of a given type in the figure +text_elms = findall(gca,'Type','text'); +for ndx = 1:length(text_elms) + callbk = 'my_call(str2num(get(gcbo,''String'')))'; + set(text_elms(ndx), 'ButtonDownFcn', callbk); % assume the node label is a number +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/graph_to_dot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/graph_to_dot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,86 @@ +function graph_to_dot(adj, varargin) +%GRAPH_TO_DOT Makes a GraphViz (AT&T) file representing an adjacency matrix +% graph_to_dot(adj, ...) writes to the specified filename. +% +% Optional arguments can be passed as name/value pairs: [default] +% +% 'filename' - if omitted, writes to 'tmp.dot' +% 'arc_label' - arc_label{i,j} is a string attached to the i-j arc [""] +% 'node_label' - node_label{i} is a string attached to the node i ["i"] +% 'width' - width in inches [10] +% 'height' - height in inches [10] +% 'leftright' - 1 means layout left-to-right, 0 means top-to-bottom [0] +% 'directed' - 1 means use directed arcs, 0 means undirected [1] +% +% For details on graphviz, See http://www.research.att.com/sw/tools/graphviz +% +% See also dot_to_graph and draw_dot. + +% First version written by Kevin Murphy 2002. +% Modified by Leon Peshkin, Jan 2004. +% Bugfix by Tom Minka, Mar 2004. + +node_label = []; arc_label = []; % set default args +width = 10; height = 10; +leftright = 0; directed = 1; filename = 'tmp.dot'; + +for i = 1:2:nargin-1 % get optional args + switch varargin{i} + case 'filename', filename = varargin{i+1}; + case 'node_label', node_label = varargin{i+1}; + case 'arc_label', arc_label = varargin{i+1}; + case 'width', width = varargin{i+1}; + case 'height', height = varargin{i+1}; + case 'leftright', leftright = varargin{i+1}; + case 'directed', directed = varargin{i+1}; + end +end +% minka +if ~directed + adj = triu(adj | adj'); +end + +fid = fopen(filename, 'w'); +if directed + fprintf(fid, 'digraph G {\n'); + arctxt = '->'; + if isempty(arc_label) + labeltxt = ''; + else + labeltxt = '[label="%s"]'; + end +else + fprintf(fid, 'graph G {\n'); + arctxt = '--'; + if isempty(arc_label) + labeltxt = '[dir=none]'; + else + labeltext = '[label="%s",dir=none]'; + end +end +edgeformat = strcat(['%d ',arctxt,' %d ',labeltxt,';\n']); +fprintf(fid, 'center = 1;\n'); +fprintf(fid, 'size=\"%d,%d\";\n', width, height); +if leftright + fprintf(fid, 'rankdir=LR;\n'); +end +Nnds = length(adj); +for node = 1:Nnds % process nodes + if isempty(node_label) + fprintf(fid, '%d;\n', node); + else + fprintf(fid, '%d [ label = "%s" ];\n', node, node_label{node}); + end +end +for node1 = 1:Nnds % process edges + arcs = find(adj(node1,:)); % children(adj, node); + for node2 = arcs + if ~isempty(arc_label) + fprintf(fid, edgeformat,node1,node2,arc_label{node1,node2}); + else + fprintf(fid, edgeformat, node1, node2); + end + end +end +fprintf(fid, '}'); +fclose(fid); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/make_layout.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/make_layout.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,170 @@ +function [x, y] = layout_dag(adj) +% MAKE_LAYOUT Creates a layout from an adjacency matrix +% +% [X, Y] = MAKE_LAYOUT(ADJ) +% +% Inputs : +% ADJ = adjacency matrix (source, sink) +% +% Outputs : +% X, Y : Positions of nodes +% +% Usage Example : [X, Y] = make_layout(adj); +% +% +% Note : Uses some very simple heuristics, so any other +% algorithm would create a nicer layout +% +% See also + +% Uses : + +% Change History : +% Date Time Prog Note +% 13-Apr-2000 8:25 PM ATC Created under MATLAB 5.3.1.29215a (R11.1) + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +N = size(adj,1); +tps = toposort(adj); + +if ~isempty(tps), % is directed ? + level = zeros(1,N); + for i=tps, + idx = find(adj(:,i)); + if ~isempty(idx), + l = max(level(idx)); + level(i)=l+1; + end; + end; +else + level = poset(adj,1)'-1; +end; + +y = (level+1)./(max(level)+2); +y = 1-y; +x = zeros(size(y)); +for i=0:max(level), + idx = find(level==i); + offset = (rem(i,2)-0.5)/10; + x(idx) = (1:length(idx))./(length(idx)+1)+offset; +end; + +%%%%%%% + +function [depth] = poset(adj, root) +% POSET Identify a partial ordering among the nodes of a graph +% +% [DEPTH] = POSET(ADJ,ROOT) +% +% Inputs : +% ADJ : Adjacency Matrix +% ROOT : Node to start with +% +% Outputs : +% DEPTH : Depth of the Node +% +% Usage Example : [depth] = poset(adj,12); +% +% +% Note : All Nodes must be connected +% See also + +% Uses : + +% Change History : +% Date Time Prog Note +% 17-Jun-1998 12:01 PM ATC Created under MATLAB 5.1.0.421 + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +adj = adj+adj'; + +N = size(adj,1); +depth = zeros(N,1); +depth(root) = 1; +queue = root; + +while 1, + if isempty(queue), + if all(depth), break; + else + root = find(depth==0); + root = root(1); + depth(root) = 1; + queue = root; + end; + end; + r = queue(1); queue(1) = []; + idx = find(adj(r,:)); + idx2 = find(~depth(idx)); + idx = idx(idx2); + queue = [queue idx]; + depth(idx) = depth(r)+1; +end; + +%%%%%%%%% + +function [seq] = toposort(adj) +% TOPOSORT A Topological ordering of nodes in a directed graph +% +% [SEQ] = TOPOSORT(ADJ) +% +% Inputs : +% ADJ : Adjacency Matrix. +% ADJ(i,j)==1 ==> there exists a directed edge +% from i to j +% +% Outputs : +% SEQ : A topological ordered sequence of nodes. +% empty matrix if graph contains cycles. +% +% Usage Example : +% N=5; +% [l,u] = lu(rand(N)); +% adj = ~diag(ones(1,N)) & u>0.5; +% seq = toposort(adj); +% +% +% Note : +% See also + +% Uses : + +% Change History : +% Date Time Prog Note +% 18-May-1998 4:44 PM ATC Created under MATLAB 5.1.0.421 + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +N = size(adj); +indeg = sum(adj,1); +outdeg = sum(adj,2); +seq = []; + +for i=1:N, + % Find nodes with indegree 0 + idx = find(indeg==0); + % If can't find than graph contains a cycle + if isempty(idx), + seq = []; + break; + end; + % Remove the node with the max number of connections + [dummy idx2] = max(outdeg(idx)); + indx = idx(idx2); + seq = [seq, indx]; + indeg(indx)=-1; + idx = find(adj(indx,:)); + indeg(idx) = indeg(idx)-1; +end; + + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/my_call.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/my_call.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,3 @@ +function my_call(value) +fprintf('%d \n', value); % might check here whether this is a label at all + % since we get here by clicking on ANY text in figure diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/GraphViz/process_options.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/GraphViz/process_options.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,132 @@ +% PROCESS_OPTIONS - Processes options passed to a Matlab function. +% This function provides a simple means of +% parsing attribute-value options. Each option is +% named by a unique string and is given a default +% value. +% +% Usage: [var1, var2, ..., varn[, unused]] = ... +% process_options(args, ... +% str1, def1, str2, def2, ..., strn, defn) +% +% Arguments: +% args - a cell array of input arguments, such +% as that provided by VARARGIN. Its contents +% should alternate between strings and +% values. +% str1, ..., strn - Strings that are associated with a +% particular variable +% def1, ..., defn - Default values returned if no option +% is supplied +% +% Returns: +% var1, ..., varn - values to be assigned to variables +% unused - an optional cell array of those +% string-value pairs that were unused; +% if this is not supplied, then a +% warning will be issued for each +% option in args that lacked a match. +% +% Examples: +% +% Suppose we wish to define a Matlab function 'func' that has +% required parameters x and y, and optional arguments 'u' and 'v'. +% With the definition +% +% function y = func(x, y, varargin) +% +% [u, v] = process_options(varargin, 'u', 0, 'v', 1); +% +% calling func(0, 1, 'v', 2) will assign 0 to x, 1 to y, 0 to u, and 2 +% to v. The parameter names are insensitive to case; calling +% func(0, 1, 'V', 2) has the same effect. The function call +% +% func(0, 1, 'u', 5, 'z', 2); +% +% will result in u having the value 5 and v having value 1, but +% will issue a warning that the 'z' option has not been used. On +% the other hand, if func is defined as +% +% function y = func(x, y, varargin) +% +% [u, v, unused_args] = process_options(varargin, 'u', 0, 'v', 1); +% +% then the call func(0, 1, 'u', 5, 'z', 2) will yield no warning, +% and unused_args will have the value {'z', 2}. This behaviour is +% useful for functions with options that invoke other functions +% with options; all options can be passed to the outer function and +% its unprocessed arguments can be passed to the inner function. + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [varargout] = process_options(args, varargin) + +% Check the number of input arguments +n = length(varargin); +if (mod(n, 2)) + error('Each option must be a string/value pair.'); +end + +% Check the number of supplied output arguments +if (nargout < (n / 2)) + error('Insufficient number of output arguments given'); +elseif (nargout == (n / 2)) + warn = 1; + nout = n / 2; +else + warn = 0; + nout = n / 2 + 1; +end + +% Set outputs to be defaults +varargout = cell(1, nout); +for i=2:2:n + varargout{i/2} = varargin{i}; +end + +% Now process all arguments +nunused = 0; +for i=1:2:length(args) + found = 0; + for j=1:2:n + if strcmpi(args{i}, varargin{j}) + varargout{(j + 1)/2} = args{i + 1}; + found = 1; + break; + end + end + if (~found) + if (warn) + warning(sprintf('Option ''%s'' not used.', args{i})); + args{i} + else + nunused = nunused + 1; + unused{2 * nunused - 1} = args{i}; + unused{2 * nunused} = args{i + 1}; + end + end +end + +% Assign the unused arguments +if (~warn) + if (nunused) + varargout{nout} = unused; + else + varargout{nout} = cell(0); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,29 @@ +/README.txt/1.1.1.1/Thu Jun 9 01:22:48 2005// +/dhmm_em.m/1.1.1.1/Thu Jun 9 01:25:04 2005// +/dhmm_em_demo.m/1.1.1.1/Sun May 4 22:01:12 2003// +/dhmm_em_online.m/1.1.1.1/Sun May 4 22:02:58 2003// +/dhmm_em_online_demo.m/1.1.1.1/Sun May 4 22:04:10 2003// +/dhmm_logprob.m/1.1.1.1/Sun May 4 22:01:34 2003// +/dhmm_logprob_brute_force.m/1.1.1.1/Wed May 29 15:59:56 2002// +/dhmm_logprob_path.m/1.1.1.1/Wed May 29 15:59:56 2002// +/dhmm_sample.m/1.1.1.1/Mon May 31 22:19:50 2004// +/dhmm_sample_endstate.m/1.1.1.1/Sun May 4 22:00:34 2003// +/fixed_lag_smoother.m/1.1.1.1/Wed Jan 22 17:56:04 2003// +/fixed_lag_smoother_demo.m/1.1.1.1/Thu Jun 9 01:27:20 2005// +/fwdback.m/1.1.1.1/Thu Jun 9 01:17:50 2005// +/gausshmm_train_observed.m/1.1.1.1/Thu Feb 12 23:08:22 2004// +/mc_sample.m/1.1.1.1/Mon May 24 22:26:34 2004// +/mc_sample_endstate.m/1.1.1.1/Wed Jan 22 20:32:28 2003// +/mdp_sample.m/1.1.1.1/Wed May 29 15:59:56 2002// +/mhmmParzen_train_observed.m/1.1.1.1/Sat Feb 14 02:06:30 2004// +/mhmm_em.m/1.1.1.1/Sun Feb 8 04:52:42 2004// +/mhmm_em_demo.m/1.1.1.1/Tue May 13 16:11:22 2003// +/mhmm_logprob.m/1.1.1.1/Sun May 4 22:11:54 2003// +/mhmm_sample.m/1.1.1.1/Wed May 26 00:32:28 2004// +/mk_leftright_transmat.m/1.1.1.1/Wed May 29 15:59:58 2002// +/mk_rightleft_transmat.m/1.1.1.1/Fri Nov 22 21:45:52 2002// +/pomdp_sample.m/1.1.1.1/Sun May 4 21:58:20 2003// +/testHMM.m/1.1.1.1/Thu Jun 9 01:25:50 2005// +/transmat_train_observed.m/1.1.1.1/Sun Aug 29 12:41:52 2004// +/viterbi_path.m/1.1.1.1/Sat Oct 23 01:18:22 2004// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/HMM diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/README.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/README.txt Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,23 @@ +Hidden Markov Model (HMM) Toolbox written by Kevin Murphy (1998). +See http://www.ai.mit.edu/~murphyk/Software/hmm.html for details. + +Models +------ + +dhmm = HMM with discrete output +mhmm = HMM with mixture of Gaussians output; + Use mhmm with M=1 components to simulate an HMM with a single Gaussian output. + +Demos +----- + +mhmm_em_demo +dhmm_em_demo +dhmm_em_online_demo +fixed_lag_smoother_demo + +References +----------- + +See "A tutorial on Hidden Markov Models and selected applications in speech recognition", + L. Rabiner, 1989, Proc. IEEE 77(2):257--286. diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/dhmm_em.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_em.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,124 @@ +function [LL, prior, transmat, obsmat, nrIterations] = ... + dhmm_em(data, prior, transmat, obsmat, varargin) +% LEARN_DHMM Find the ML/MAP parameters of an HMM with discrete outputs using EM. +% [ll_trace, prior, transmat, obsmat, iterNr] = learn_dhmm(data, prior0, transmat0, obsmat0, ...) +% +% Notation: Q(t) = hidden state, Y(t) = observation +% +% INPUTS: +% data{ex} or data(ex,:) if all sequences have the same length +% prior(i) +% transmat(i,j) +% obsmat(i,o) +% +% Optional parameters may be passed as 'param_name', param_value pairs. +% Parameter names are shown below; default values in [] - if none, argument is mandatory. +% +% 'max_iter' - max number of EM iterations [10] +% 'thresh' - convergence threshold [1e-4] +% 'verbose' - if 1, print out loglik at every iteration [1] +% 'obs_prior_weight' - weight to apply to uniform dirichlet prior on observation matrix [0] +% +% To clamp some of the parameters, so learning does not change them: +% 'adj_prior' - if 0, do not change prior [1] +% 'adj_trans' - if 0, do not change transmat [1] +% 'adj_obs' - if 0, do not change obsmat [1] +% +% Modified by Herbert Jaeger so xi are not computed individually +% but only their sum (over time) as xi_summed; this is the only way how they are used +% and it saves a lot of memory. + +[max_iter, thresh, verbose, obs_prior_weight, adj_prior, adj_trans, adj_obs] = ... + process_options(varargin, 'max_iter', 10, 'thresh', 1e-4, 'verbose', 1, ... + 'obs_prior_weight', 0, 'adj_prior', 1, 'adj_trans', 1, 'adj_obs', 1); + +previous_loglik = -inf; +loglik = 0; +converged = 0; +num_iter = 1; +LL = []; + +if ~iscell(data) + data = num2cell(data, 2); % each row gets its own cell +end + +while (num_iter <= max_iter) & ~converged + % E step + [loglik, exp_num_trans, exp_num_visits1, exp_num_emit] = ... + compute_ess_dhmm(prior, transmat, obsmat, data, obs_prior_weight); + + % M step + if adj_prior + prior = normalise(exp_num_visits1); + end + if adj_trans & ~isempty(exp_num_trans) + transmat = mk_stochastic(exp_num_trans); + end + if adj_obs + obsmat = mk_stochastic(exp_num_emit); + end + + if verbose, fprintf(1, 'iteration %d, loglik = %f\n', num_iter, loglik); end + num_iter = num_iter + 1; + converged = em_converged(loglik, previous_loglik, thresh); + previous_loglik = loglik; + LL = [LL loglik]; +end +nrIterations = num_iter - 1; + +%%%%%%%%%%%%%%%%%%%%%%% + +function [loglik, exp_num_trans, exp_num_visits1, exp_num_emit, exp_num_visitsT] = ... + compute_ess_dhmm(startprob, transmat, obsmat, data, dirichlet) +% COMPUTE_ESS_DHMM Compute the Expected Sufficient Statistics for an HMM with discrete outputs +% function [loglik, exp_num_trans, exp_num_visits1, exp_num_emit, exp_num_visitsT] = ... +% compute_ess_dhmm(startprob, transmat, obsmat, data, dirichlet) +% +% INPUTS: +% startprob(i) +% transmat(i,j) +% obsmat(i,o) +% data{seq}(t) +% dirichlet - weighting term for uniform dirichlet prior on expected emissions +% +% OUTPUTS: +% exp_num_trans(i,j) = sum_l sum_{t=2}^T Pr(X(t-1) = i, X(t) = j| Obs(l)) +% exp_num_visits1(i) = sum_l Pr(X(1)=i | Obs(l)) +% exp_num_visitsT(i) = sum_l Pr(X(T)=i | Obs(l)) +% exp_num_emit(i,o) = sum_l sum_{t=1}^T Pr(X(t) = i, O(t)=o| Obs(l)) +% where Obs(l) = O_1 .. O_T for sequence l. + +numex = length(data); +[S O] = size(obsmat); +exp_num_trans = zeros(S,S); +exp_num_visits1 = zeros(S,1); +exp_num_visitsT = zeros(S,1); +exp_num_emit = dirichlet*ones(S,O); +loglik = 0; + +for ex=1:numex + obs = data{ex}; + T = length(obs); + %obslik = eval_pdf_cond_multinomial(obs, obsmat); + obslik = multinomial_prob(obs, obsmat); + [alpha, beta, gamma, current_ll, xi_summed] = fwdback(startprob, transmat, obslik); + + loglik = loglik + current_ll; + exp_num_trans = exp_num_trans + xi_summed; + exp_num_visits1 = exp_num_visits1 + gamma(:,1); + exp_num_visitsT = exp_num_visitsT + gamma(:,T); + % loop over whichever is shorter + if T < O + for t=1:T + o = obs(t); + exp_num_emit(:,o) = exp_num_emit(:,o) + gamma(:,t); + end + else + for o=1:O + ndx = find(obs==o); + if ~isempty(ndx) + exp_num_emit(:,o) = exp_num_emit(:,o) + sum(gamma(:, ndx), 2); + end + end + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/dhmm_em_demo.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_em_demo.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,25 @@ +O = 3; +Q = 2; + +% "true" parameters +prior0 = normalise(rand(Q,1)); +transmat0 = mk_stochastic(rand(Q,Q)); +obsmat0 = mk_stochastic(rand(Q,O)); + +% training data +T = 5; +nex = 10; +data = dhmm_sample(prior0, transmat0, obsmat0, T, nex); + +% initial guess of parameters +prior1 = normalise(rand(Q,1)); +transmat1 = mk_stochastic(rand(Q,Q)); +obsmat1 = mk_stochastic(rand(Q,O)); + +% improve guess of parameters using EM +[LL, prior2, transmat2, obsmat2] = dhmm_em(data, prior1, transmat1, obsmat1, 'max_iter', 5); +LL + +% use model to compute log likelihood +loglik = dhmm_logprob(data, prior2, transmat2, obsmat2) +% log lik is slightly different than LL(end), since it is computed after the final M step diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/dhmm_em_online.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_em_online.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,80 @@ +function [transmat, obsmat, exp_num_trans, exp_num_emit, gamma, ll] = dhmm_em_online(... + prior, transmat, obsmat, exp_num_trans, exp_num_emit, decay, data, ... + act, adj_trans, adj_obs, dirichlet, filter_only) +% ONLINE_EM Adjust the parameters using a weighted combination of the old and new expected statistics +% +% [transmat, obsmat, exp_num_trans, exp_num_emit, gamma, ll] = online_em(... +% prior, transmat, obsmat, exp_num_trans, exp_num_emit, decay, data, act, ... +% adj_trans, adj_obs, dirichlet, filter_only) +% +% 0 < decay < 1, with smaller values meaning the past is forgotten more quickly. +% (We need to decay the old ess, since they were based on out-of-date parameters.) +% The other params are as in learn_hmm. +% We do a single forwards-backwards pass on the provided data, initializing with the specified prior. +% (If filter_only = 1, we only do a forwards pass.) + +if ~exist('act'), act = []; end +if ~exist('adj_trans'), adj_trans = 1; end +if ~exist('adj_obs'), adj_obs = 1; end +if ~exist('dirichlet'), dirichlet = 0; end +if ~exist('filter_only'), filter_only = 0; end + +% E step +olikseq = multinomial_prob(data, obsmat); +if isempty(act) + [alpha, beta, gamma, ll, xi] = fwdback(prior, transmat, olikseq, 'fwd_only', filter_only); +else + [alpha, beta, gamma, ll, xi] = fwdback(prior, transmat, olikseq, 'fwd_only', filter_only, ... + 'act', act); +end + +% Increment ESS +[S O] = size(obsmat); +if adj_obs + exp_num_emit = decay*exp_num_emit + dirichlet*ones(S,O); + T = length(data); + if T < O + for t=1:T + o = data(t); + exp_num_emit(:,o) = exp_num_emit(:,o) + gamma(:,t); + end + else + for o=1:O + ndx = find(data==o); + if ~isempty(ndx) + exp_num_emit(:,o) = exp_num_emit(:,o) + sum(gamma(:, ndx), 2); + end + end + end +end + +if adj_trans & (T > 1) + if isempty(act) + exp_num_trans = decay*exp_num_trans + sum(xi,3); + else + % act(2) determines Q(2), xi(:,:,1) holds P(Q(1), Q(2)) + A = length(transmat); + for a=1:A + ndx = find(act(2:end)==a); + if ~isempty(ndx) + exp_num_trans{a} = decay*exp_num_trans{a} + sum(xi(:,:,ndx), 3); + end + end + end +end + + +% M step + +if adj_obs + obsmat = mk_stochastic(exp_num_emit); +end +if adj_trans & (T>1) + if isempty(act) + transmat = mk_stochastic(exp_num_trans); + else + for a=1:A + transmat{a} = mk_stochastic(exp_num_trans{a}); + end + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/dhmm_em_online_demo.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_em_online_demo.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,93 @@ +% Example of online EM applied to a simple POMDP with fixed action seq + +clear all + +% Create a really easy model to learn +rand('state', 1); +O = 2; +S = 2; +A = 2; +prior0 = [1 0]'; +transmat0 = cell(1,A); +transmat0{1} = [0.9 0.1; 0.1 0.9]; % long runs of 1s and 2s +transmat0{2} = [0.1 0.9; 0.9 0.1]; % short runs +obsmat0 = eye(2); + +%prior0 = normalise(rand(S,1)); +%transmat0 = mk_stochastic(rand(S,S)); +%obsmat0 = mk_stochastic(rand(S,O)); + +T = 10; +act = [1*ones(1,25) 2*ones(1,25) 1*ones(1,25) 2*ones(1,25)]; +data = pomdp_sample(prior0, transmat0, obsmat0, act); +%data = sample_dhmm(prior0, transmat0, obsmat0, T, 1); + +% Initial guess of params +rand('state', 2); % different seed! +transmat1 = cell(1,A); +for a=1:A + transmat1{a} = mk_stochastic(rand(S,S)); +end +obsmat1 = mk_stochastic(rand(S,O)); +prior1 = prior0; % so it labels states the same way + +% Uniformative Dirichlet prior (expected sufficient statistics / pseudo counts) +e = 0.001; +ess_trans = cell(1,A); +for a=1:A + ess_trans{a} = repmat(e, S, S); +end +ess_emit = repmat(e, S, O); + +% Params +w = 2; +decay_sched = [0.1:0.1:0.9]; + +% Initialize +LL1 = zeros(1,T); +t = 1; +y = data(t); +data_win = y; +act_win = [1]; % arbitrary initial value +[prior1, LL1(1)] = normalise(prior1 .* obsmat1(:,y)); + +% Iterate +for t=2:T + y = data(t); + a = act(t); + if t <= w + data_win = [data_win y]; + act_win = [act_win a]; + else + data_win = [data_win(2:end) y]; + act_win = [act_win(2:end) a]; + prior1 = gamma(:, 2); + end + d = decay_sched(min(t, length(decay_sched))); + [transmat1, obsmat1, ess_trans, ess_emit, gamma, ll] = dhmm_em_online(... + prior1, transmat1, obsmat1, ess_trans, ess_emit, d, data_win, act_win); + bel = gamma(:, end); + LL1(t) = ll/length(data_win); + %fprintf('t=%d, ll=%f\n', t, ll); +end + +LL1(1) = LL1(2); % since initial likelihood is for 1 slice +plot(1:T, LL1, 'rx-'); + + +% compare with offline learning + +if 0 +rand('state', 2); % same seed as online learner +transmat2 = cell(1,A); +for a=1:A + transmat2{a} = mk_stochastic(rand(S,S)); +end +obsmat2 = mk_stochastic(rand(S,O)); +prior2 = prior0; +[LL2, prior2, transmat2, obsmat2] = dhmm_em(data, prior2, transmat2, obsmat2, .... + 'max_iter', 10, 'thresh', 1e-3, 'verbose', 1, 'act', act); + +LL2 = LL2 / T + +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function [loglik, errors] = dhmm_logprob(data, prior, transmat, obsmat) +% LOG_LIK_DHMM Compute the log-likelihood of a dataset using a discrete HMM +% [loglik, errors] = log_lik_dhmm(data, prior, transmat, obsmat) +% +% data{m} or data(m,:) is the m'th sequence +% errors is a list of the cases which received a loglik of -infinity + +if ~iscell(data) + data = num2cell(data, 2); +end +ncases = length(data); + +loglik = 0; +errors = []; +for m=1:ncases + obslik = multinomial_prob(data{m}, obsmat); + [alpha, beta, gamma, ll] = fwdback(prior, transmat, obslik, 'fwd_only', 1); + if ll==-inf + errors = [errors m]; + end + loglik = loglik + ll; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob_brute_force.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob_brute_force.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,21 @@ +function logp = enumerate_HMM_loglik(prior, transmat, obsmat) +% ENUMERATE_HMM_LOGLIK Compute the log likelihood of a sequence by exhaustive (O(Q^T)) enumeration. +% logp = enumerate_HMM_loglik(prior, transmat, obsmat) +% +% Inputs: +% prior(i) = Pr(Q(1) = i) +% transmat(i,j) = Pr(Q(t+1)=j | Q(t)=i) +% obsmat(i,t) = Pr(y(t) | Q(t)=i) + +Q = length(prior); +T = size(obsmat, 2); +sizes = repmat(Q, 1, T); + +psum = 0; +for i=1:Q^T + qs = ind2subv(sizes, i); % make the state sequence + psum = psum + prob_path(prior, transmat, obsmat, qs); +end +logp = log(psum) + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob_path.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/HMM/dhmm_logprob_path.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,15 @@ +function [ll, p] = prob_path(prior, transmat, obsmat, qs) +% PROB_PATH Compute the prob. of a specific path (state sequence) through an HMM. +% [ll, p] = prob_path(prior, transmat, obsmat, states) +% +% ll = log prob path +% p(t) = Pr(O(t)) * Pr(Q(t) -> Q(t+1)) for 1<=t

% ASORT
% a pedestrian NUMERICAL SORTER of ALPHANUMERIC data
 
% - create some data
          d = {
%         strings with one valid alphanumeric number
%         sorted numerically
                  '-inf'
                  'x-3.2e4y'
                  'f-1.4'
                  '-.1'
                  '+ .1d-2'
                  '.1'
                  'f.1'
                  'f -+1.4'
                  'f.2'
                  'f.3'
                  'f.10'
                  'f.11'
                  '+inf'
                  ' -nan'
                  '+ nan'
                  'nan'
%         strings with many numbers or invalid/ambiguous numbers
%         sorted in ascii dictionary order
                  ' nan nan'
                  '+ .1e-.2'
                  '-1 2'
                  'Z12e12ez'
                  'inf -inf'
                  's.3TT.4'
                  'z12e12ez'
%         strings without numbers
%         sorted in ascii dictionary order
                  ' . .. '
                  '.'
                  '...'
                  '.b a.'
                  'a string'
                  'a. .b'
          };
%   ... and scramble it...
          rand('seed',10);
          d=d(randperm(numel(d)));
 
% - run ASORT with
%   verbose output:              <-v>
%   keep additional results:     <-d>
          o=asort(d,'-v','-d');
% - or
%         p=asort(char(d),'-v','-d');
 
    'INPUT'       'ASCII SORT'    'NUM SORT'             'NUM READ'         
    '...'         ' -nan'         '--- NUMERICAL'        '--- NUMBERS'      
    '+ .1e-.2'    ' . .. '        '-inf'                 [             -Inf]
    '.1'          ' nan nan'      'x-3.2e4y'             [           -32000]
    '.b a.'       '+ .1d-2'       'f-1.4'                [             -1.4]
    '-inf'        '+ .1e-.2'      '-.1'                  [             -0.1]
    'f.1'         '+ nan'         '+ .1d-2'              [            0.001]
    ' -nan'       '+inf'          '.1'                   [              0.1]
    '-1 2'        '-.1'           'f.1'                  [                1]
    'nan'         '-1 2'          'f -+1.4'              [              1.4]
    'a string'    '-inf'          'f.2'                  [                2]
    'f.3'         '.'             'f.3'                  [                3]
    '+ .1d-2'     '...'           'f.10'                 [               10]
    'a. .b'       '.1'            'f.11'                 [               11]
    's.3TT.4'     '.b a.'         '+inf'                 [              Inf]
    '+inf'        'Z12e12ez'      ' -nan'                [              NaN]
    ' nan nan'    'a string'      '+ nan'                [              NaN]
    'f-1.4'       'a. .b'         'nan'                  [              NaN]
    'x-3.2e4y'    'f -+1.4'       '--- ASCII NUMBERS'    '--- ASCII NUMBERS'
    'inf -inf'    'f-1.4'         ' nan nan'             ' nan nan'         
    '+ nan'       'f.1'           '+ .1e-.2'             '+ .1e-.2'         
    'f.2'         'f.10'          '-1 2'                 '-1 2'             
    'f.11'        'f.11'          'Z12e12ez'             'Z12e12ez'         
    'Z12e12ez'    'f.2'           'inf -inf'             'inf -inf'         
    'z12e12ez'    'f.3'           's.3TT.4'              's.3TT.4'          
    'f -+1.4'     'inf -inf'      'z12e12ez'             'z12e12ez'         
    ' . .. '      'nan'           '--- ASCII STRINGS'    '--- ASCII STRINGS'
    'f.10'        's.3TT.4'       ' . .. '               ' . .. '           
    '.'           'x-3.2e4y'      '.'                    '.'                
    '-.1'         'z12e12ez'      '...'                  '...'              
    ' '           ' '             '.b a.'                '.b a.'            
    ' '           ' '             'a string'             'a string'         
    ' '           ' '             'a. .b'                'a. .b'            
 
% - show results
          o
o = 
          magic: 'ASORT'
            ver: '30-Mar-2005 11:57:07'
           time: '30-Mar-2005 11:57:17'
        runtime: 0.047
    input_class: 'cell'
    input_msize: [29 1]
    input_bytes: 2038
    strng_class: 'char'
    strng_msize: [29 8]
    strng_bytes: 464
            anr: {16x1 cell}
            snr: {7x1 cell}
            str: {6x1 cell}
              c: [29x12 char]
              t: [29x12 logical]
              n: [16x12 char]
              d: [16x1 double]
 
          o.anr
ans = 
    '-inf'
    'x-3.2e4y'
    'f-1.4'
    '-.1'
    '+ .1d-2'
    '.1'
    'f.1'
    'f -+1.4'
    'f.2'
    'f.3'
    'f.10'
    'f.11'
    '+inf'
    ' -nan'
    '+ nan'
    'nan'
 
% - run ASORT with no-space/template options
%   NOTE the impact of -w/-t order!
          s={'ff - 1','ff + 1','- 12'};
 
%   RAW
          o=asort(s,'-v');
 
    'INPUT'     'ASCII SORT'    'NUM SORT'             'NUM READ'         
    'ff - 1'    '- 12'          '--- NUMERICAL'        '--- NUMBERS'      
    'ff + 1'    'ff + 1'        'ff + 1'               [                1]
    '- 12'      'ff - 1'        'ff - 1'               [                1]
    ' '         ' '             '- 12'                 [               12]
    ' '         ' '             '--- ASCII NUMBERS'    '--- ASCII NUMBERS'
    ' '         ' '             '--- ASCII STRINGS'    '--- ASCII STRINGS'
 
%   remove SPACEs
          o=asort(s,'-v','-w');
 
    'INPUT'    'ASCII SORT'    'NUM SORT'             'NUM READ'         
    'ff-1'     '-12'           '--- NUMERICAL'        '--- NUMBERS'      
    'ff+1'     'ff+1'          '-12'                  [              -12]
    '-12'      'ff-1'          'ff-1'                 [               -1]
    ' '        ' '             'ff+1'                 [                1]
    ' '        ' '             '--- ASCII NUMBERS'    '--- ASCII NUMBERS'
    ' '        ' '             '--- ASCII STRINGS'    '--- ASCII STRINGS'
 
%   remove TEMPLATE(s)
          o=asort(s,'-v','-t',{'ff','1'});
 
    'INPUT'    'ASCII SORT'    'NUM SORT'             'NUM READ'         
    ' - '      ' + '           '--- NUMERICAL'        '--- NUMBERS'      
    ' + '      ' - '           '- 2'                  [                2]
    '- 2'      '- 2'           '--- ASCII NUMBERS'    '--- ASCII NUMBERS'
    ' '        ' '             '--- ASCII STRINGS'    '--- ASCII STRINGS'
    ' '        ' '             ' + '                  ' + '              
    ' '        ' '             ' - '                  ' - '              
 
%   remove TEMPLATE(s) than SPACEs
          o=asort(s,'-v','-t','1','-w');
 
    'INPUT'    'ASCII SORT'    'NUM SORT'             'NUM READ'         
    'ff-'      '-2'            '--- NUMERICAL'        '--- NUMBERS'      
    'ff+'      'ff+'           '-2'                   [               -2]
    '-2'       'ff-'           '--- ASCII NUMBERS'    '--- ASCII NUMBERS'
    ' '        ' '             '--- ASCII STRINGS'    '--- ASCII STRINGS'
    ' '        ' '             'ff+'                  'ff+'              
    ' '        ' '             'ff-'                  'ff-'              
 
%   remove SPACEs than TEMPLATE(s)
          o=asort(s,'-v','-w','-t','1');
 
    'INPUT'    'ASCII SORT'    'NUM SORT'             'NUM READ'         
    'ff- '     '- 2'           '--- NUMERICAL'        '--- NUMBERS'      
    'ff+ '     'ff+ '          '- 2'                  [                2]
    '- 2'      'ff- '          '--- ASCII NUMBERS'    '--- ASCII NUMBERS'
    ' '        ' '             '--- ASCII STRINGS'    '--- ASCII STRINGS'
    ' '        ' '             'ff+ '                 'ff+ '             
    ' '        ' '             'ff- '                 'ff- '             
 
+ + + +
+ + + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/asdemo.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/asdemo.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,70 @@ +% ASORT +% a pedestrian NUMERICAL SORTER of ALPHANUMERIC data + +% - create some data + d = { +% strings with one valid alphanumeric number +% sorted numerically + '-inf' + 'x-3.2e4y' + 'f-1.4' + '-.1' + '+ .1d-2' + '.1' + 'f.1' + 'f -+1.4' + 'f.2' + 'f.3' + 'f.10' + 'f.11' + '+inf' + ' -nan' + '+ nan' + 'nan' +% strings with many numbers or invalid/ambiguous numbers +% sorted in ascii dictionary order + ' nan nan' + '+ .1e-.2' + '-1 2' + 'Z12e12ez' + 'inf -inf' + 's.3TT.4' + 'z12e12ez' +% strings without numbers +% sorted in ascii dictionary order + ' . .. ' + '.' + '...' + '.b a.' + 'a string' + 'a. .b' + }; +% ... and scramble it... + rand('seed',10); + d=d(randperm(numel(d))); + +% - run ASORT with +% verbose output: <-v> +% keep additional results: <-d> + o=asort(d,'-v','-d'); +% - or +% p=asort(char(d),'-v','-d'); + +% - show results + o + o.anr + +% - run ASORT with no-space/template options +% NOTE the impact of -w/-t order! + s={'ff - 1','ff + 1','- 12'}; +% RAW + o=asort(s,'-v'); +% remove SPACEs + o=asort(s,'-v','-w'); +% remove TEMPLATE(s) + o=asort(s,'-v','-t',{'ff','1'}); +% remove TEMPLATE(s) than SPACEs + o=asort(s,'-v','-t','1','-w'); +% remove SPACEs than TEMPLATE(s) + o=asort(s,'-v','-w','-t','1'); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/asort.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/asort.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,376 @@ +%[ANR,SNR,STR] = ASORT(INP,'OPT',...); +% S = ASORT(INP,'OPT',...); +% to sort alphanumeric strings numerically if +% they contain one properly formatted number +% otherwise, ascii dictionary sorting is applied +% +% INP unsorted input: +% - a char array +% - a cell array of strings +% OPT options +% -s - sorting option +% '-s','ascend' [def] +% '-s','descend' +% -st - force output form S [def: nargout dependent] +% -t - replace matching template(s) with one space +% prior to sorting +% '-t','template' +% '-t',{'template1','template2',...} +% -w - remove space(s) prior to sorting +% +% NOTE -t/-w options are processed in the +% order that they appear in +% the command line +% +% -v - verbose output [def: quiet] +% -d - debug mode +% save additional output in S +% .c: lex parser input +% .t: lex parser table +% .n: lex parser output +% .d: numbers read from .n +% +% ANR numerically sorted alphanumeric strings [eg, 'f.-1.5e+2x.x'] +% - contain one number that can be read by +% | +% SNR ascii dict sorted alphanumeric strings +% http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=7212# +% +% - contain more than one number [eg, 'f.-1.5e +2.x'] +% - contain incomplete|ambiguous numbers [eg, 'f.-1.5e+2.x'] +% STR ascii dict sorted strings +% - contain no numbers [eg, 'a test'] +% +% S structure with fields +% .anr +% .srn +% .str + +% created: +% us 03-Mar-2002 +% modified: +% us 30-Mar-2005 11:57:07 / TMW R14.sp2 + +%-------------------------------------------------------------------------------- +function varargout=asort(inp,varargin) + +varargout(1:nargout)={[]}; +if ~nargin + help(mfilename); + return; +end + +% - common parameters/options +n=[]; +ds=[]; +anr={}; +snr={}; +str={}; +smod='ascend'; % sorting option +tmpl={}; % template(s) +sflg=false; % output mode: structure +tflg=false; % remove template(s) +dflg=false; % debug mode +vflg=false; % verbose output +wflg=false; % remove spaces + +if nargin > 1 + ix=find(strcmp('-s',varargin)); + if ~isempty(ix) && nargin > ix(end)+1 + smod=varargin{ix(end)+1}; + end + ix=find(strcmp('-t',varargin)); + if ~isempty(ix) && nargin > ix(end)+1 + tflg=ix(end); + tmpl=varargin{ix(end)+1}; + end + if find(strcmp('-d',varargin)); + dflg=true; + end + if find(strcmp('-st',varargin)); + sflg=true; + end + if find(strcmp('-v',varargin)); + vflg=true; + end + ix=find(strcmp('-w',varargin)); + if ~isempty(ix) + wflg=ix(end); + end +end +% spec numbers +ntmpl={ + ' inf ' + '+inf ' + '-inf ' + ' nan ' + '+nan ' + '-nan ' + }; +% spec chars +ctmpl={ + '.' % decimal point + 'd' % exponent + 'e' % exponent + }; + +if nargout <= 3 + varargout{1}=inp; +else + disp(sprintf('ASORT> too many output args [%-1d/%-1d]\n',nargout,3)); + help(mfilename); + return; +end +if isempty(inp) + disp(sprintf('ASORT> input is empty')); + return; +end + +ti=clock; +winp=whos('inp'); +switch winp.class + case 'cell' + if ~iscellstr(inp) + disp(sprintf('ASORT> cell is not an array of strings')); + return; + end + inp=inp(:); + [ins,inx]=sort(inp); + case 'char' + % [ins,inx]=sortrows(inp); + inp=cstr(inp); + otherwise + disp(sprintf('ASORT> does not sort input of class <%s>',winp.class)); + return; +end + +inp=inp(:); +inp=setinp(inp,tmpl,[tflg wflg]); +[ins,inx]=sort(inp); +if strcmp(smod,'descend') + ins=ins(end:-1:1,:); + inx=inx(end:-1:1); +end +ins=inp(inx); +c=lower(char(ins)); +wins=whos('c'); +[cr,cc]=size(c); + +% - LEXICAL PARSER +%-------------------------------------------------------------------------------- +% - extend input on either side for search +c=[' '*ones(cr,2) c ' '*ones(cr,2)]; + +% - search for valid alphanumeric items in strings +% numbers/signs +t=(c>='0'&c<='9'); +t=t|c=='-'; +t=t|c=='+'; +[tr,tc]=size(t); +% decimal points +% note: valid numbers with dec points must follow these templates +% nr.nr +% sign.nr +% nr. +% .nr +ix1= t(:,1:end-2) & ... + ~isletter(c(:,1:end-2)) & ... + c(:,2:end-1)=='.'; +t(:,2:end-1)=t(:,2:end-1)|ix1; +ix1= (t(:,3:end) & ... + (~isletter(c(:,3:end)) & ... + ~isletter(c(:,1:end-2))) | ... + (c(:,3:end)=='e' | ... + c(:,3:end)=='d')) & ... + c(:,2:end-1)=='.'; +t(:,2:end-1)=t(:,2:end-1)|ix1; +% t(:,3:end)=t(:,3:end)|ix1; +% signs +t(c=='-')=false; +t(c=='+')=false; +ix1= t(:,3:end) & ... + (c(:,2:end-1)=='-' | ... + c(:,2:end-1)=='+'); +t(:,2:end-1)=t(:,2:end-1)|ix1; +% exponents +ix1= t(:,1:end-2) & ... + (c(:,2:end-1)=='e' | ... + c(:,2:end-1)=='d'); +t(:,2:end-1)=t(:,2:end-1)|ix1; +% spec numbers +c=reshape(c.',1,[]); +t=t'; +ic=[]; +for j=1:numel(ntmpl) + ic=[ic,strfind(c,ntmpl{j})]; +end +ic=sort(ic); +for i=1:numel(ic) + ix=ic(i)+0:ic(i)+4; + t(ix)=true; +end +t=t'; +c=reshape(c.',[tc,tr]).'; +t(c==' ')=false; +%-------------------------------------------------------------------------------- + +% - only allow one number per string +il=~any(t,2); +ib=strfind(reshape(t.',1,[]),[0 1]); +if ~isempty(ib) + ixe=cell(3,1); + n=reshape(char(t.*c).',1,[]); + for i=1:numel(ctmpl) + id=strfind(n,ctmpl{i}); + if ~isempty(id) + [dum,dum,ixu{i},ixe{i}]=dupinx(id,tc); + end + end + in=false(tr,1); + im=in; + % must check for anomalous cases like <'.d'> + id=sort(... + [find(n>='0' & n<='9'),... + strfind(n,'inf'),... + strfind(n,'nan')]); + % [ibu,ibd,ixbu,ixe{i+1}]=dupinx(id,tc); + [ibu,ibd,ixbu,ixbd]=dupinx(id,tc); + in(ixbu)=true; + in(ixbd)=true; + [ibu,ibd,ixbu,ixbd]=dupinx(ib,tc); + im(ixbu)=true; + in=in&im; + in([ixe{:}])=false; + il=~any(t,2); + ia=~(in|il); + + % - read valid strings + n=t(in,:).*c(in,:); + n(n==0)=' '; + n=char(n); + dn=strread(n.','%n'); + if numel(dn) ~= numel(find(in)) + %disp(sprintf('ASORT> unexpected fatal error reading input!')); + if nargout + s.c=c; + s.t=t; + s.n=n; + s.d=dn; + varargout{1}=s; + end + return; + end + + % - sort numbers + [ds,dx]=sort(dn,1,smod); + in=find(in); + anr=ins(in(dx)); + snr=ins(ia); +end +str=ins(il); +to=clock; + +% - prepare output +if nargout < 3 || sflg + s.magic='ASORT'; + s.ver='30-Mar-2005 11:57:07'; + s.time=datestr(clock); + s.runtime=etime(to,ti); + s.input_class=winp.class; + s.input_msize=winp.size; + s.input_bytes=winp.bytes; + s.strng_class=wins.class; + s.strng_msize=wins.size; + s.strng_bytes=wins.bytes; + s.anr=anr; + s.snr=snr; + s.str=str; + if dflg + s.c=c; + s.t=t; + s.n=n; + s.d=ds; + end + varargout{1}=s; +else + s={anr,snr,str}; + for i=1:nargout + varargout{i}=s{i}; + end +end + +if vflg + inp=cstr(inp); + an=[{'--- NUMERICAL'}; anr]; + as=[{'--- ASCII NUMBERS'}; snr]; + at=[{'--- ASCII STRINGS'}; str]; + nn=[{'--- NUMBERS'}; num2cell(ds)]; + ag={' ';' ';' '}; + u=[{'INPUT'}; inp;ag]; + v=[{'ASCII SORT'}; ins;ag]; + w=[{'NUM SORT'}; an;as;at]; + x=[{'NUM READ'}; nn;as;at]; + w=[u,v,w,x]; + disp(w); +end + +return; +%-------------------------------------------------------------------------------- +function c=cstr(s) +% - bottleneck waiting for a good replacement +% it consumes ~75% of 's processing time! + +c=s; +if ischar(s) + sr=size(s,1); + c=cell(sr,1); + for i=1:sr + c{i}=s(i,:); % no deblanking! + end +end +return; +%-------------------------------------------------------------------------------- +function [idu,idd,ixu,ixd]=dupinx(ix,nc) +% - check for more than one entry/row in a matrix of column size +% unique indices: idu / ixu +% duplicate indices: idd / ixd + +if isempty(ix) + idu=[]; + idd=[]; + ixu=[]; + ixd=[]; + return; +end +id=fix(ix/nc)+1; +idi=diff(id)~=0; +ide=[true idi]; +idb=[idi true]; +idu=idb & ide; +idd=idb==1 & ide==0; +ixu=id(idu); +ixd=id(idd); +return; +%-------------------------------------------------------------------------------- +function inp=setinp(inp,tmpl,flg) +% - remove space(s) and/or templates + +if isempty(inp) || ~any(flg) + return; +end + +for i=sort(flg) + switch i + case flg(1) + if ischar(tmpl) + tmpl={tmpl}; + end + for i=1:numel(tmpl) + inp=strrep(inp,tmpl{i},' '); + end + case flg(2) + inp=strrep(inp,' ',''); + end +end +return; +%-------------------------------------------------------------------------------- diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/assert.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/assert.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ +function assert(pred, str) +% ASSERT Raise an error if the predicate is not true. +% assert(pred, string) + +if nargin<2, str = ''; end + +if ~pred + s = sprintf('assertion violated: %s', str); + error(s); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/assertBNT.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/assertBNT.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ +function assertBNT(pred, str) +% ASSERT Raise an error if the predicate is not true. +% assert(pred, string) + +if nargin<2, str = ''; end + +if ~pred + s = sprintf('assertion violated: %s', str); + error(s); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/assignEdgeNums.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/assignEdgeNums.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,32 @@ +function [edge_id, nedges] = assignEdgeNums(adj_mat) +% give each edge a unique number +% we number (i,j) for j>i first, in row, column order. +% Then we number the reverse links + +nnodes = length(adj_mat); +edge_id = zeros(nnodes); +e = 1; +for i=1:nnodes + for j=i+1:nnodes + if adj_mat(i,j) + edge_id(i,j) = e; + e = e+1; + end + end +end + +nedges = e-1; +tmp = edge_id; +ndx = find(tmp); +tmp(ndx) = tmp(ndx)+nedges; +edge_id = edge_id + triu(tmp)'; + + +if 0 +ndx = find(adj_mat); +nedges = length(ndx); +nnodes = length(adj_mat); +edge_id = zeros(1, nnodes*nnodes); +edge_id(ndx) = 1:nedges; +edge_id = reshape(edge_id, nnodes, nnodes); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/assign_cols.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/assign_cols.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,36 @@ +function M = assign_cols(cols, vals, M) +% ASSIGN_COLS Assign values to columns of a matrix +% function M = assign_cols(M, cols, vals, M) +% +% Example: +% M = assign_cols(data, ones(1,N)) +% will construct a 1-of-K encoding of the data, where K=ncols=max(data) and N=nrows=length(data) +% +% Example: +% M = zeros(3,2); +% M = assign_cols([1 2 1], [10 20 30], M) +% is equivalent to +% M(1, 1) = 10 +% M(2, 2) = 20 +% M(3, 1) = 30 +% + +if nargin < 3 + nr = length(cols); + nc = max(cols); + M = zeros(nr, nc); +else + [nr nc] = size(M); +end + +if 0 +for r=1:nr + M(r, cols(r)) = vals(r); +end +end + +if 1 +rows = 1:nr; +ndx = subv2ind([nr nc], [rows(:) cols(:)]); +M(ndx) = vals; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/axis_pct.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/axis_pct.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,70 @@ +function ax = axis_pct(pct) +% AXIS_PCT Set reasonable axis limits. +% AXIS_PCT(pct) sets axis limits to extend pct% beyond limits of plotted +% objects. Default is 5%. +% Works for linear or log scale. +% Unfortunately, the axes won't change when new points are plotted. + +if nargin < 1 + pct = 0.05; +end +ax = [Inf -Inf Inf -Inf Inf -Inf]; + +% find bounding box of plotted objects +children = get(gca,'children'); +for child = children' + if strcmp(get(child,'type'),'text') + xyz = get(child,'position'); + % need to determine bounding box of the text + c([1 2]) = xyz(1); + c([3 4]) = xyz(2); + c([5 6]) = xyz(3); + else + x = get(child,'xdata'); + c(1) = min(x); + c(2) = max(x); + y = get(child,'ydata'); + c(3) = min(y); + c(4) = max(y); + z = get(child,'zdata'); + if isempty(z) + c([5 6]) = 0; + else + c(5) = min(z); + c(6) = max(z); + end + end + ax([1 3 5]) = min(ax([1 3 5]), c([1 3 5])); + ax([2 4 6]) = max(ax([2 4 6]), c([2 4 6])); +end +if strcmp(get(gca,'xscale'), 'log') + ax([1 2]) = log(ax([1 2])); +end +if strcmp(get(gca,'yscale'), 'log') + ax([3 4]) = log(ax([3 4])); +end +dx = ax(2)-ax(1); +if dx == 0 + dx = 1; +end +dy = ax(4)-ax(3); +if dy == 0 + dy = 1; +end +dz = ax(6)-ax(5); +if dz == 0 + dz = 1; +end +ax = ax + [-dx dx -dy dy -dz dz]*pct; +if strcmp(get(gca,'xscale'), 'log') + ax([1 2]) = exp(ax([1 2])); +end +if strcmp(get(gca,'yscale'), 'log') + ax([3 4]) = exp(ax([3 4])); +end +% clip for 2D +ax = ax(1:length(axis)); +axis(ax); +if nargout < 1 + clear ax +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingDemo.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingDemo.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,112 @@ +% Consider matching sources to detections + +% s1 d2 +% s2 d3 +% d1 + +%a = bipartiteMatchingHungarian([52;0.01]) + +% sources(:,i) = [x y] coords +sources = [0.1 0.7; 0.6 0.4]'; +detections = [0.2 0.2; 0.2 0.8; 0.7 0.1]'; +dst = sqdist(sources, detections); + +% a = [2 3] which means s1-d2, s2-d3 +a = bipartiteMatchingHungarian(dst); +a2 = bipartiteMatchingIntProg(dst); +assert(isequal(a(:),a2(:))) + + +figure(1); clf +bipartiteMatchingDemoPlot(sources, detections, a) + + + + +%%%% Flip roles of sources and detections + +%dst = dst'; +dst = sqdist(detections, sources); +% a = [0 1 2] which means d1-0, d2-s1, d3-s2 +a = bipartiteMatchingHungarian(dst); + +a2 = bipartiteMatchingIntProg(dst); +assert(isequal(a(:),a2(:))) + +figure(2); clf +bipartiteMatchingDemoPlot(detections, sources, a) % swapped args + + + + +%%%%%%%%%% Move s1 nearer to d1 +% d2 +% s2 d3 +% s1 d1 + +sources = [0.1 0.3; 0.6 0.4]'; +detections = [0.2 0.2; 0.2 0.8; 0.7 0.1]'; +dst = sqdist(sources, detections); + +% a = [2 3] which means s1-d2, s2-d3 +a = bipartiteMatchingHungarian(dst); +[a2, ass] = bipartiteMatchingIntProg(dst); +assert(isequal(a(:),a2(:))) + + +figure(3); clf +bipartiteMatchingDemoPlot(sources, detections, a) + + + +%%%%%%%%%% + +% Use random points + +% Generate 2D data from a mixture of 2 Gaussians (from netlab demgmm1) +randn('state', 0); rand('state', 0); +gmix = gmm(2, 2, 'spherical'); +ndat1 = 10; ndat2 = 10; ndata = ndat1+ndat2; +%gmix.centres = [0.3 0.3; 0.7 0.7]; +%gmix.covars = [0.01 0.01]; +gmix.centres = [0.5 0.5; 0.5 0.5]; +gmix.covars = [0.1 0.01]; +[x, label] = gmmsamp(gmix, ndata); + +ndx = find(label==1); +sources = x(ndx,:)'; +ndx = find(label==2); +detections = x(ndx,:)'; +dst = sqdist(sources, detections); + +[a, ass] = bipartiteMatchingIntProg(dst); +[a2] = bipartiteMatchingHungarian(dst); +assert(isequal(a(:), a2(:))) + +figure(4); clf +bipartiteMatchingDemoPlot(sources, detections, a) + +% only match 80% of points +p1 = size(sources, 2); +p2 = size(detections, 2); +nmatch = ceil(0.8*min(p1,p2)); +a2 = bipartiteMatchingIntProg(dst, nmatch); +figure(5); clf +bipartiteMatchingDemoPlot(sources, detections, a2) + + +%%% swap roles + +ndx = find(label==2); +sources = x(ndx,:)'; +ndx = find(label==1); +detections = x(ndx,:)'; +dst = sqdist(sources, detections); + +% only match 80% of points +p1 = size(sources, 2); +p2 = size(detections, 2); +nmatch = ceil(0.8*min(p1,p2)); +a2 = bipartiteMatchingIntProg(dst, nmatch); +figure(6); clf +bipartiteMatchingDemoPlot(sources, detections, a2) diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingDemoPlot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingDemoPlot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,24 @@ +function bipartiteMatchingDemoPlot(sources, detections, a) + +hold on +p1 = size(sources,2); +p2 = size(detections,2); +for i=1:p1 + h=text(sources(1,i), sources(2,i), sprintf('s%d', i)); + set(h, 'color', 'r'); +end +for i=1:p2 + h=text(detections(1,i), detections(2,i), sprintf('d%d', i)); + set(h, 'color', 'b'); +end + +if nargin < 3, return; end + +for i=1:p1 + j = a(i); + if j==0 % i not matched to anything + continue + end + line([sources(1,i) detections(1,j)], [sources(2,i) detections(2,j)]) +end +axis_pct; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingHungarian.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingHungarian.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,90 @@ +% MATCH - Solves the weighted bipartite matching (or assignment) +% problem. +% +% Usage: a = match(C); +% +% Arguments: +% C - an m x n cost matrix; the sets are taken to be +% 1:m and 1:n; C(i, j) gives the cost of matching +% items i (of the first set) and j (of the second set) +% +% Returns: +% +% a - an m x 1 assignment vector, which gives the +% minimum cost assignment. a(i) is the index of +% the item of 1:n that was matched to item i of +% 1:m. If item i (of 1:m) was not matched to any +% item of 1:n, then a(i) is zero. + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [a] = optimalMatching(C) + +% Trivial cases: +[p, q] = size(C); +if (p == 0) + a = []; + return; +elseif (q == 0) + a = zeros(p, 1); + return; +end + + +if 0 +% First, reduce the problem by making easy optimal matches. If two +% elements agree that they are the best match, then match them up. +[x, a] = min(C, [], 2); +[y, b] = min(C, [], 1); +u = find(1:p ~= b(a(:))); +a(u) = 0; +v = find(1:q ~= a(b(:))'); +C = C(u, v); +if (isempty(C)) return; end +end + +% Get the (new) size of the two sets, u and v. +[m, n] = size(C); + +%mx = realmax; +mx = 2*max(C(:)); +mn = -2*min(C(:)); +% Pad the affinity matrix to be square +if (m < n) + C = [C; mx * ones(n - m, n)]; +elseif (n < m) + C = [C, mx * ones(m, m - n)]; +end + +% Run the Hungarian method. First replace infinite values by the +% largest (or smallest) finite values. +C(find(isinf(C) & (C > 0))) = mx; +C(find(isinf(C) & (C < 0))) = mn; +%fprintf('running hungarian\n'); +[b, cost] = hungarian(C'); + +% Extract only the real assignments +ap = b(1:m)'; +ap(find(ap > n)) = 0; + +a = ap; +%% Incorporate this sub-assignment into the complete assignment +% k = find(ap); +% a(u(k)) = v(ap(k)); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingIntProg.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/bipartiteMatchingIntProg.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,69 @@ +function [a,ass] = bipartiteMatchingIntProg(dst, nmatches) +% BIPARTITEMATCHINGINTPROG Use binary integer programming (linear objective) to solve for optimal linear assignment +% function a = bipartiteMatchingIntProg(dst) +% a(i) = best matching column for row i +% +% This gives the same result as bipartiteMatchingHungarian. +% +% function a = bibpartiteMatchingIntProg(dst, nmatches) +% only matches the specified number (must be <= min(size(dst))). +% This can be used to allow outliers in both source and target. +% +% For details, see Marciel & Costeira, "A global solution to sparse correspondence +% problems", PAMI 25(2), 2003 + +if nargin < 2, nmatches = []; end + +[p1 p2] = size(dst); +p1orig = p1; p2orig = p2; +dstorig = dst; + +if isempty(nmatches) % no outliers allowed (modulo size difference) + % ensure matrix is square + m = max(dst(:)); + if p1p2 + dst = [dst m*ones(p1, p1-p2)]; + end +end +[p1 p2] = size(dst); + + +c = dst(:); % vectorize cost matrix + +% row-sum: ensure each column sums to 1 +A2 = kron(eye(p2), ones(1,p1)); +b2 = ones(p2,1); + +% col-sum: ensure each row sums to 1 +A3 = kron(ones(1,p2), eye(p1)); +b3 = ones(p1,1); + +if isempty(nmatches) + % enforce doubly stochastic + A = [A2; A3]; + b = [b2; b3]; + Aineq = zeros(1, p1*p2); + bineq = 0; +else + nmatches = min([nmatches, p1, p2]); + Aineq = [A2; A3]; + bineq = [b2; b3]; % row and col sums <= 1 + A = ones(1,p1*p2); + b = nmatches; % total num matches = b (otherwise get degenerate soln) +end + + +ass = bintprog(c, Aineq, bineq, A, b); +ass = reshape(ass, p1, p2); + +a = zeros(1, p1orig); +for i=1:p1orig + ndx = find(ass(i,:)==1); + if ~isempty(ndx) & (ndx <= p2orig) + a(i) = ndx; + end +end + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/block.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/block.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,15 @@ +function sub = block(blocks, block_sizes) +% BLOCK Return a vector of subscripts corresponding to the specified blocks. +% sub = block(blocks, block_sizes) +% +% e.g., block([2 5], [2 1 2 1 2]) = [3 7 8]. + +blocks = blocks(:)'; +block_sizes = block_sizes(:)'; +skip = [0 cumsum(block_sizes)]; +start = skip(blocks)+1; +fin = start + block_sizes(blocks) - 1; +sub = []; +for j=1:length(blocks) + sub = [sub start(j):fin(j)]; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/cell2matPad.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/cell2matPad.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function data2 = cell2matPad(data) +% data{f}(y,x,b) - each frame can have a different size (can can even be empty) +% data2(y,x,b,f) = zero padded version + +Nframes = length(data); +Nbands = -inf; +nr = -inf; nc = -inf; +for f=1:Nframes + if isempty(data{f}), continue; end + nr = max(nr, size(data{f},1)); + nc = max(nc, size(data{f},2)); + Nbands = max(Nbands, size(data{f},3)); +end +data2 = zeros(nr, nc, Nbands, Nframes); +for f=1:Nframes + if isempty(data{f}), continue; end + data2(1:size(data{f},1), 1:size(data{f},2), :, f) = data{f}; +end +if Nbands == 1 + data2 = squeeze(data2); % reshape(data2, [size(data2,1), size(data2,2), Nframes]); +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/cell2num.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/cell2num.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,57 @@ +function N = cell2num(C) +% CELL2NUM Convert a 2D cell array to a 2D numeric array +% N = cell2num(C) +% If the cells contain column vectors, they must have the same number of rows in each row of C. +% Each column will be concatenated. +% +% Example 1: +% C = num2cell(rand(2,2)) +% [0.4565] [0.8214] +% [0.0185] [0.4447] +% N = cell2num(C) +% 0.4565 0.8214 +% 0.0185 0.4447 +% +% Example 2: +% C = cell(2, 3); +% for i=1:2 +% for j=1:3 +% C{i,j} = rand(i, 1); +% end +% end +% C = +% [ 0.8998] [ 0.8216] [ 0.6449] +% [2x1 double] [2x1 double] [2x1 double] +% C{2,1} = +% 0.8180 +% 0.6602 +% N=cell2num(C) +% 0.8998 0.8216 0.6449 +% 0.8180 0.3420 0.3412 +% 0.6602 0.2897 0.5341 + + +% error('use cell2mat in matlab 7') + + +if isempty(C) + N = []; + return; +end + +if any(cellfun('isempty', C)) %any(isemptycell(C)) + error('can''t convert cell array with empty cells to matrix') +end + +[nrows ncols] = size(C); +%N = reshape(cat(1, C{:}), [nrows ncols]); % this only works if C only contains scalars +r = 0; +for i=1:nrows + r = r + size(C{i,1}, 1); +end +c = 0; +for j=1:ncols + c = c + size(C{1,j}, 2); +end +N = reshape(cat(1, C{:}), [r c]); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/centeringMatrix.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/centeringMatrix.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,8 @@ +N = 3; +x = rand(N,2); % each row is a feature vector +m = mean(x,1); +xc = x-repmat(m, N, 1); + +C = eye(N) - (1/N)*ones(N,N); +xc2 = C*x; +assert(approxeq(xc, xc2)) diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/checkpsd.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/checkpsd.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,15 @@ +function s = checkpsd(s) + +if (any(isnan(s) | isinf(s) | ~isreal(s))) + warning('S contains complex numbers, Inf, or NaN'); +end +% Drop any negative eigenvalues. +[V, D] = eig(full(s)); +d = real(diag(D)); +if (any(d < 0)) + warning(sprintf(['S is not positive semidefinite (min. eig. =' ... + ' %0.5g); projecting.'], min(d))); + d(find(d < 0)) = 0; + D = diag(d); + s = V * D * V'; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/chi2inv.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/chi2inv.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,36 @@ +function x = chi2inv(p,v); +%CHI2INV Inverse of the chi-square cumulative distribution function (cdf). +% X = CHI2INV(P,V) returns the inverse of the chi-square cdf with V +% degrees of freedom at the values in P. The chi-square cdf with V +% degrees of freedom, is the gamma cdf with parameters V/2 and 2. +% +% The size of X is the common size of P and V. A scalar input +% functions as a constant matrix of the same size as the other input. + +% References: +% [1] M. Abramowitz and I. A. Stegun, "Handbook of Mathematical +% Functions", Government Printing Office, 1964, 26.4. +% [2] E. Kreyszig, "Introductory Mathematical Statistics", +% John Wiley, 1970, section 10.2 (page 144) + +% Copyright 1993-2002 The MathWorks, Inc. +% $Revision: 1.1.1.1 $ $Date: 2005/04/26 02:30:30 $ + +if nargin < 2, + error('Requires two input arguments.'); +end + +[errorcode p v] = distchck(2,p,v); + +if errorcode > 0 + error('Requires non-scalar arguments to match in size.'); +end + +% Call the gamma inverse function. +x = gaminv(p,v/2,2); + +% Return NaN if the degrees of freedom is not positive. +k = (v <= 0); +if any(k(:)) + x(k) = NaN; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/choose.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/choose.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function c = choose(n,k) +% CHOOSE The number of ways of choosing k things from n +% c = choose(n,k) + +c = factorial(n)/(factorial(k) * factorial(n-k)); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/collapse_mog.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/collapse_mog.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function [new_mu, new_Sigma, new_Sigma2] = collapse_mog(mu, Sigma, coefs) +% COLLAPSE_MOG Collapse a mixture of Gaussians to a single Gaussian by moment matching +% [new_mu, new_Sigma] = collapse_mog(mu, Sigma, coefs) +% +% coefs(i) - weight of i'th mixture component +% mu(:,i), Sigma(:,:,i) - params of i'th mixture component + +% S = sum_c w_c (S_c + m_c m_c' + m m' - 2 m_c m') +% = sum_c w_c (S_c + m_c m_c') + m m' - 2 (sum_c m_c) m' +% = sum_c w_c (S_c + m_c m_c') - m m' + +new_mu = sum(mu * diag(coefs), 2); % weighted sum of columns + +n = length(new_mu); +new_Sigma = zeros(n,n); +new_Sigma2 = zeros(n,n); +for j=1:length(coefs) + m = mu(:,j) - new_mu; + new_Sigma = new_Sigma + coefs(j) * (Sigma(:,:,j) + m*m'); + new_Sigma2 = new_Sigma2 + coefs(j) * (Sigma(:,:,j) + mu(:,j)*mu(:,j)'); +end +%assert(approxeq(new_Sigma, new_Sigma2 - new_mu*new_mu')) diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/colmult.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/colmult.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,62 @@ +#include +#include "mex.h" + +/* +out = colop(M, v) + +Apply binary operator to a vector v and to each column of M in turn +to produce a matrix the same size as M. + +This is equivalent to + +out = zeros(size(M)); +for col=1:size(M,2) + out(:,col) = op(M(:,col), v); +end + +The code needs to be modified for each different operator 'op'. +eg op = '.*' + +In vectorized form: + +out = M .* repmat(v(:), 1, size(M,2)) + +(This function was formerly called repmat_and_mult.c) + +*/ + +/* M(i,j) = M(i + nrows*j) since Matlab uses Fortran layout. */ + + +#define INMAT(i,j) M[(i)+nrows*(j)] +#define OUTMAT(i,j) out[(i)+nrows*(j)] + +void mexFunction( + int nlhs, mxArray *plhs[], + int nrhs, const mxArray *prhs[] + ) +{ + double *out, *M, *v; + int nrows, ncols, r, c; + + /* read the input args */ + M = mxGetPr(prhs[0]); + nrows = mxGetM(prhs[0]); + ncols = mxGetN(prhs[0]); + + v = mxGetPr(prhs[1]); + + plhs[0] = mxCreateDoubleMatrix(nrows, ncols, mxREAL); + out = mxGetPr(plhs[0]); + + for (c=0; c < ncols; c++) { + for (r=0; r < nrows; r++) { + OUTMAT(r,c) = INMAT(r,c) * v[r]; + /* printf("r=%d, c=%d, M=%f, v=%f\n", r, c, INMAT(r,c), v[r]); */ + } + } + +} + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/computeROC.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/computeROC.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,64 @@ +function [FPrate, TPrate, AUC, thresholds] = computeROC(confidence, testClass) +% function [FPrate, TPrate, AUC, thresholds] = computeROC(confidence, testClass) +% +% computeROC computes the data for an ROC curve based on a classifier's confidence output. +% It returns the false positive rate and the true positive rate along with +% the area under the ROC curve, and the list of thresholds. +% +% Inputs: +% - confidence(i) is proportional to the probability that +% testClass(i) is positive +% +% testClass = 0 => target absent +% testClass = 1 => target present +% +% Based on algorithms 2 and 4 from Tom Fawcett's paper "ROC Graphs: Notes and +% Practical Considerations for Data Mining Researchers" (2003) +% http://www.hpl.hp.com/techreports/2003/HPL-2003-4.pdf" +% +% Vlad Magdin, 21 Feb 2005 + +% break ties in scores +S = rand('state'); +rand('state',0); +confidence = confidence + rand(size(confidence))*10^(-10); +rand('state',S) +[thresholds order] = sort(confidence, 'descend'); +testClass = testClass(order); + +%%% -- calculate TP/FP rates and totals -- %%% +AUC = 0; +faCnt = 0; +tpCnt = 0; +falseAlarms = zeros(1,size(thresholds,2)); +detections = zeros(1,size(thresholds,2)); +fPrev = -inf; +faPrev = 0; +tpPrev = 0; + +P = max(size(find(testClass==1))); +N = max(size(find(testClass==0))); + +for i=1:length(thresholds) + if thresholds(i) ~= fPrev + falseAlarms(i) = faCnt; + detections(i) = tpCnt; + + AUC = AUC + polyarea([faPrev faPrev faCnt/N faCnt/N],[0 tpPrev tpCnt/P 0]); + + fPrev = thresholds(i); + faPrev = faCnt/N; + tpPrev = tpCnt/P; + end + + if testClass(i) == 1 + tpCnt = tpCnt + 1; + else + faCnt = faCnt + 1; + end +end + +AUC = AUC + polyarea([faPrev faPrev 1 1],[0 tpPrev 1 0]); + +FPrate = falseAlarms/N; +TPrate = detections/P; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/compute_counts.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/compute_counts.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,17 @@ +function count = compute_counts(data, sz) +% COMPUTE_COUNTS Count the number of times each combination of discrete assignments occurs +% count = compute_counts(data, sz) +% +% data(i,t) is the value of variable i in case t +% sz(i) : values for variable i are assumed to be in [1:sz(i)] +% +% Example: to compute a transition matrix for an HMM from a sequence of labeled states: +% transmat = mk_stochastic(compute_counts([seq(1:end-1); seq(2:end)], [nstates nstates])); + +assert(length(sz) == size(data, 1)); +P = prod(sz); +indices = subv2ind(sz, data'); % each row of data' is a case +%count = histc(indices, 1:P); +count = hist(indices, 1:P); +count = myreshape(count, sz); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/conf2mahal.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/conf2mahal.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,62 @@ +% CONF2MAHAL - Translates a confidence interval to a Mahalanobis +% distance. Consider a multivariate Gaussian +% distribution of the form +% +% p(x) = 1/sqrt((2 * pi)^d * det(C)) * exp((-1/2) * MD(x, m, inv(C))) +% +% where MD(x, m, P) is the Mahalanobis distance from x +% to m under P: +% +% MD(x, m, P) = (x - m) * P * (x - m)' +% +% A particular Mahalanobis distance k identifies an +% ellipsoid centered at the mean of the distribution. +% The confidence interval associated with this ellipsoid +% is the probability mass enclosed by it. Similarly, +% a particular confidence interval uniquely determines +% an ellipsoid with a fixed Mahalanobis distance. +% +% If X is an d dimensional Gaussian-distributed vector, +% then the Mahalanobis distance of X is distributed +% according to the Chi-squared distribution with d +% degrees of freedom. Thus, the Mahalanobis distance is +% determined by evaluating the inverse cumulative +% distribution function of the chi squared distribution +% up to the confidence value. +% +% Usage: +% +% m = conf2mahal(c, d); +% +% Inputs: +% +% c - the confidence interval +% d - the number of dimensions of the Gaussian distribution +% +% Outputs: +% +% m - the Mahalanobis radius of the ellipsoid enclosing the +% fraction c of the distribution's probability mass +% +% See also: MAHAL2CONF + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +function m = conf2mahal(c, d) + +m = chi2inv(c, d); \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/cross_entropy.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/cross_entropy.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,15 @@ +function kl = cross_entropy(p, q, symmetric) +% CROSS_ENTROPY Compute the Kullback-Leibler divergence between two discrete prob. distributions +% kl = cross_entropy(p, q, symmetric) +% +% If symmetric = 1, we compute the symmetric version. Default: symmetric = 0; + +tiny = exp(-700); +if nargin < 3, symmetric = 0; end +p = p(:); +q = q(:); +if symmetric + kl = (sum(p .* log((p+tiny)./(q+tiny))) + sum(q .* log((q+tiny)./(p+tiny))))/2; +else + kl = sum(p .* log((p+tiny)./(q+tiny))); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/dirKPM.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/dirKPM.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,98 @@ +function filenames = dirKPM(dirname, ext, varargin) +% dirKPM Like the built-in dir command, but returns filenames as a cell array instead of a struct +% +% filenames = dirKPM(dirname) +% returns all files, except '.' and '..' +% +% filenames = dirKPM('images', '*.jpg') +% returns files with this extension +% eg filenames{1} = 'foo.jpg' etc +% +% OPTIONAL ARGUMENTS [default in brackets] +% filenames = dirKPM('images', '', param1, val1, param2, val2, ...) +% +% 'fileType'='image' ['all'] means return files with extension .jpg, .png, .bmp +% +% 'prepend'=1 [0] means preprend folder name to filename +% eg filenames{1} = 'images/foo.jpg' +% +% 'doSort'=1 [1] means sort filenames in ascending alphanumerical order (where possible) +% +% 'doRecurse'=1 [0] recursive dir, apply the same dirKPM call on all +% subfolders (decrease MAXDEPTH option to prevent recursion from branching +% too explosively) + +if nargin < 1, dirname = '.'; end + +if nargin < 2, ext = ''; end + +[fileType, prepend, doSort, doRecurse, MAXDEPTH, DEPTH] = process_options(... + varargin, 'fileType', 'all', 'prepend', 0, 'doSort', 1, 'doRecurse', 0,... + 'MAXDEPTH', 3, 'DEPTH', 0); + +tmp = dir(fullfile(dirname, ext)); +[filenames I] = setdiff({tmp.name}, {'.', '..'}); +tmp = tmp(I); + +if doRecurse && sum([tmp.isdir])>0 && DEPTH0 + filenames(nfilenames+1:nfilenames+length(subDirFilenames)) = subDirFilenames; + end + end +end + +nfiles = length(filenames); +if nfiles==0 return; end + +switch fileType + case 'image', + for fi=1:nfiles + good(fi) = isImage(filenames{fi}); + end + filenames = filenames(find(good)); + case 'all', + % no-op + otherwise + error(sprintf('unrecognized file type %s', fileType)); +end + +if doSort +% % sort filenames alphanumerically (if possible) +% DJE, buggy, MUST save tmp.anr/snr/str or else we potentially lose +% filenames +% tmp = asort(filenames, '-s', 'ascend'); +% if ~isempty(tmp.anr) +% filenames = tmp.anr'; +% else +% filenames = tmp.str'; +% end + % if names could not be sorted, return original order + + filenames=sort(filenames); + +end + + +if prepend + nfiles = length(filenames); + for fi=1:nfiles + filenames{fi} = fullfile(dirname, filenames{fi}); + end +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/div.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/div.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function d = div(a,b) +% DIV Integer division +% d = div(a,b) + +d = floor(a / b); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/draw_circle.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/draw_circle.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,28 @@ +function h = draw_circle(x, r, outline_color, fill_color) +% draw filled circles at centers x with radii r. +% x is a matrix of columns. r is a row vector. + +n = 40; % resolution +radians = [0:(2*pi)/(n-1):2*pi]; +unitC = [sin(radians); cos(radians)]; + +% extend r if necessary +if length(r) < cols(x) + r = [r repmat(r(length(r)), 1, cols(x)-length(r))]; +end + +h = []; +% hold is needed for fill() +held = ishold; +hold on +for i=1:cols(x) + y = unitC*r(i) + repmat(x(:, i), 1, n); + if nargin < 4 + h = [h line(y(1,:), y(2,:), 'Color', outline_color)]; + else + h = [h fill(y(1,:), y(2,:), fill_color, 'EdgeColor', outline_color)]; + end +end +if ~held + hold off +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/draw_ellipse.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/draw_ellipse.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,24 @@ +function h = draw_ellipse(x, c, outline_color, fill_color) +% DRAW_ELLIPSE(x, c, outline_color, fill_color) +% Draws ellipses at centers x with covariance matrix c. +% x is a matrix of columns. c is a positive definite matrix. +% outline_color and fill_color are optional. + +n = 40; % resolution +radians = [0:(2*pi)/(n-1):2*pi]; +unitC = [sin(radians); cos(radians)]; +r = chol(c)'; + +if nargin < 3 + outline_color = 'g'; +end + +h = []; +for i=1:cols(x) + y = r*unitC + repmat(x(:, i), 1, n); + if nargin < 4 + h = [h line(y(1,:), y(2,:), 'Color', outline_color)]; + else + h = [h fill(y(1,:), y(2,:), fill_color, 'EdgeColor', outline_color)]; + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/draw_ellipse_axes.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/draw_ellipse_axes.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,19 @@ +function h = draw_ellipse_axes(x, c, linespec) +% DRAW_ELLIPSE_AXES(x, c, linespec) +% Draws the major and minor axes of ellipses. +% Ellipses are centered at x with covariance matrix c. +% x is a matrix of columns. c is a positive definite matrix. +% linespec is optional. + +[v,e] = eig(c); +v = v*sqrt(e); + +h = []; +for j = 1:cols(v) + x1 = repmat(x(1,:),2,1) + repmat([-1;1]*v(1,j),1,cols(x)); + x2 = repmat(x(2,:),2,1) + repmat([-1;1]*v(2,j),1,cols(x)); + h = [h line(x1,x2)]; +end +if nargin > 2 + set_linespec(h,linespec); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/em_converged.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/em_converged.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,32 @@ +function [converged, decrease] = em_converged(loglik, previous_loglik, threshold, check_increased) +% EM_CONVERGED Has EM converged? +% [converged, decrease] = em_converged(loglik, previous_loglik, threshold) +% +% We have converged if the slope of the log-likelihood function falls below 'threshold', +% i.e., |f(t) - f(t-1)| / avg < threshold, +% where avg = (|f(t)| + |f(t-1)|)/2 and f(t) is log lik at iteration t. +% 'threshold' defaults to 1e-4. +% +% This stopping criterion is from Numerical Recipes in C p423 +% +% If we are doing MAP estimation (using priors), the likelihood can decrase, +% even though the mode of the posterior is increasing. + +if nargin < 3, threshold = 1e-4; end +if nargin < 4, check_increased = 1; end + +converged = 0; +decrease = 0; + +if check_increased + if loglik - previous_loglik < -1e-3 % allow for a little imprecision + fprintf(1, '******likelihood decreased from %6.4f to %6.4f!\n', previous_loglik, loglik); + decrease = 1; +converged = 0; +return; + end +end + +delta_loglik = abs(loglik - previous_loglik); +avg_loglik = (abs(loglik) + abs(previous_loglik) + eps)/2; +if (delta_loglik / avg_loglik) < threshold, converged = 1; end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/entropy.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/entropy.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,17 @@ +function H = entropy(v, scale) +% ENTROPY Entropy log base 2 +% H = entropy(v) +% If v is a matrix, we compute the entropy of each column +% +% % H = entropy(v,1) means we scale the result so that it lies in [0,1] + +if nargin < 2, scale = 0; end + +v = v + (v==0); +H = -1 * sum(v .* log2(v), 1); % sum the rows + +if scale + n = size(v, 1); + unif = normalise(ones(n,1)); + H = H / entropy(unif); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/exportfig.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/exportfig.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,991 @@ +function varargout = exportfig(varargin) +%EXPORTFIG Export a figure. +% EXPORTFIG(H, FILENAME) writes the figure H to FILENAME. H is +% a figure handle and FILENAME is a string that specifies the +% name of the output file. +% +% EXPORTFIG(H, FILENAME, OPTIONS) writes the figure H to FILENAME +% with options initially specified by the structure OPTIONS. The +% field names of OPTIONS must be legal parameters listed below +% and the field values must be legal values for the corresponding +% parameter. Default options can be set in releases prior to R12 +% by storing the OPTIONS structure in the root object's appdata +% with the command +% setappdata(0,'exportfigdefaults', OPTIONS) +% and for releases after R12 by setting the preference with the +% command +% setpref('exportfig', 'defaults', OPTIONS) +% +% EXPORTFIG(...,PARAM1,VAL1,PARAM2,VAL2,...) specifies +% parameters that control various characteristics of the output +% file. Any parameter value can be the string 'auto' which means +% the parameter uses the default factory behavior, overriding +% any other default for the parameter. +% +% Format Paramter: +% 'Format' a string +% specifies the output format. Defaults to 'eps'. For a +% list of export formats type 'help print'. +% 'Preview' one of the strings 'none', 'tiff' +% specifies a preview for EPS files. Defaults to 'none'. +% +% Size Parameters: +% 'Width' a positive scalar +% specifies the width in the figure's PaperUnits +% 'Height' a positive scalar +% specifies the height in the figure's PaperUnits +% 'Bounds' one of the strings 'tight', 'loose' +% specifies a tight or loose bounding box. Defaults to 'tight'. +% 'Reference' an axes handle or a string +% specifies that the width and height parameters +% are relative to the given axes. If a string is +% specified then it must evaluate to an axes handle. +% +% Specifying only one dimension sets the other dimension +% so that the exported aspect ratio is the same as the +% figure's or reference axes' current aspect ratio. +% If neither dimension is specified the size defaults to +% the width and height from the figure's or reference +% axes' size. Tight bounding boxes are only computed for +% 2-D views and in that case the computed bounds enclose all +% text objects. +% +% Rendering Parameters: +% 'Color' one of the strings 'bw', 'gray', 'cmyk' +% 'bw' specifies that lines and text are exported in +% black and all other objects in grayscale +% 'gray' specifies that all objects are exported in grayscale +% 'rgb' specifies that all objects are exported in color +% using the RGB color space +% 'cmyk' specifies that all objects are exported in color +% using the CMYK color space +% 'Renderer' one of 'painters', 'zbuffer', 'opengl' +% specifies the renderer to use +% 'Resolution' a positive scalar +% specifies the resolution in dots-per-inch. +% 'LockAxes' one of 0 or 1 +% specifies that all axes limits and ticks should be fixed +% while exporting. +% +% The default color setting is 'bw'. +% +% Font Parameters: +% 'FontMode' one of the strings 'scaled', 'fixed' +% 'FontSize' a positive scalar +% in 'scaled' mode multiplies with the font size of each +% text object to obtain the exported font size +% in 'fixed' mode specifies the font size of all text +% objects in points +% 'DefaultFixedFontSize' a positive scalar +% in 'fixed' mode specified the default font size in +% points +% 'FontSizeMin' a positive scalar +% specifies the minimum font size allowed after scaling +% 'FontSizeMax' a positive scalar +% specifies the maximum font size allowed after scaling +% 'FontEncoding' one of the strings 'latin1', 'adobe' +% specifies the character encoding of the font +% 'SeparateText' one of 0 or 1 +% specifies that the text objects are stored in separate +% file as EPS with the base filename having '_t' appended. +% +% If FontMode is 'scaled' but FontSize is not specified then a +% scaling factor is computed from the ratio of the size of the +% exported figure to the size of the actual figure. +% +% The default 'FontMode' setting is 'scaled'. +% +% Line Width Parameters: +% 'LineMode' one of the strings 'scaled', 'fixed' +% 'LineWidth' a positive scalar +% 'DefaultFixedLineWidth' a positive scalar +% 'LineWidthMin' a positive scalar +% specifies the minimum line width allowed after scaling +% 'LineWidthMax' a positive scalar +% specifies the maximum line width allowed after scaling +% The semantics of 'Line' parameters are exactly the +% same as the corresponding 'Font' parameters, except that +% they apply to line widths instead of font sizes. +% +% Style Map Parameter: +% 'LineStyleMap' one of [], 'bw', or a function name or handle +% specifies how to map line colors to styles. An empty +% style map means styles are not changed. The style map +% 'bw' is a built-in mapping that maps lines with the same +% color to the same style and otherwise cycles through the +% available styles. A user-specified map is a function +% that takes as input a cell array of line objects and +% outputs a cell array of line style strings. The default +% map is []. +% +% Examples: +% exportfig(gcf,'fig1.eps','height',3); +% Exports the current figure to the file named 'fig1.eps' with +% a height of 3 inches (assuming the figure's PaperUnits is +% inches) and an aspect ratio the same as the figure's aspect +% ratio on screen. +% +% opts = struct('FontMode','fixed','FontSize',10,'height',3); +% exportfig(gcf, 'fig2.eps', opts, 'height', 5); +% Exports the current figure to 'fig2.eps' with all +% text in 10 point fonts and with height 5 inches. +% +% See also PREVIEWFIG, APPLYTOFIG, RESTOREFIG, PRINT. + +% Copyright 2000 Ben Hinkle +% Email bug reports and comments to bhinkle@mathworks.com + +if (nargin < 2) + error('Too few input arguments'); +end + +% exportfig(H, filename, [options,] ...) +H = varargin{1}; +if ~LocalIsHG(H,'figure') + error('First argument must be a handle to a figure.'); +end +filename = varargin{2}; +if ~ischar(filename) + error('Second argument must be a string.'); +end +paramPairs = {varargin{3:end}}; +if nargin > 2 + if isstruct(paramPairs{1}) + pcell = LocalToCell(paramPairs{1}); + paramPairs = {pcell{:}, paramPairs{2:end}}; + end +end +verstr = version; +majorver = str2num(verstr(1)); +defaults = []; +if majorver > 5 + if ispref('exportfig','defaults') + defaults = getpref('exportfig','defaults'); + end +elseif exist('getappdata') + defaults = getappdata(0,'exportfigdefaults'); +end +if ~isempty(defaults) + dcell = LocalToCell(defaults); + paramPairs = {dcell{:}, paramPairs{:}}; +end + +% Do some validity checking on param-value pairs +if (rem(length(paramPairs),2) ~= 0) + error(['Invalid input syntax. Optional parameters and values' ... + ' must be in pairs.']); +end + +auto.format = 'eps'; +auto.preview = 'none'; +auto.width = -1; +auto.height = -1; +auto.color = 'bw'; +auto.defaultfontsize=10; +auto.fontsize = -1; +auto.fontmode='scaled'; +auto.fontmin = 8; +auto.fontmax = 60; +auto.defaultlinewidth = 1.0; +auto.linewidth = -1; +auto.linemode=[]; +auto.linemin = 0.5; +auto.linemax = 100; +auto.fontencoding = 'latin1'; +auto.renderer = []; +auto.resolution = []; +auto.stylemap = []; +auto.applystyle = 0; +auto.refobj = -1; +auto.bounds = 'tight'; +explicitbounds = 0; +auto.lockaxes = 1; +auto.separatetext = 0; +opts = auto; + +% Process param-value pairs +args = {}; +for k = 1:2:length(paramPairs) + param = lower(paramPairs{k}); + if ~ischar(param) + error('Optional parameter names must be strings'); + end + value = paramPairs{k+1}; + + switch (param) + case 'format' + opts.format = LocalCheckAuto(lower(value),auto.format); + if strcmp(opts.format,'preview') + error(['Format ''preview'' no longer supported. Use PREVIEWFIG' ... + ' instead.']); + end + case 'preview' + opts.preview = LocalCheckAuto(lower(value),auto.preview); + if ~strcmp(opts.preview,{'none','tiff'}) + error('Preview must be ''none'' or ''tiff''.'); + end + case 'width' + opts.width = LocalToNum(value, auto.width); + if ~ischar(value) | ~strcmp(value,'auto') + if ~LocalIsPositiveScalar(opts.width) + error('Width must be a numeric scalar > 0'); + end + end + case 'height' + opts.height = LocalToNum(value, auto.height); + if ~ischar(value) | ~strcmp(value,'auto') + if(~LocalIsPositiveScalar(opts.height)) + error('Height must be a numeric scalar > 0'); + end + end + case 'color' + opts.color = LocalCheckAuto(lower(value),auto.color); + if ~strcmp(opts.color,{'bw','gray','rgb','cmyk'}) + error('Color must be ''bw'', ''gray'',''rgb'' or ''cmyk''.'); + end + case 'fontmode' + opts.fontmode = LocalCheckAuto(lower(value),auto.fontmode); + if ~strcmp(opts.fontmode,{'scaled','fixed'}) + error('FontMode must be ''scaled'' or ''fixed''.'); + end + case 'fontsize' + opts.fontsize = LocalToNum(value,auto.fontsize); + if ~ischar(value) | ~strcmp(value,'auto') + if ~LocalIsPositiveScalar(opts.fontsize) + error('FontSize must be a numeric scalar > 0'); + end + end + case 'defaultfixedfontsize' + opts.defaultfontsize = LocalToNum(value,auto.defaultfontsize); + if ~ischar(value) | ~strcmp(value,'auto') + if ~LocalIsPositiveScalar(opts.defaultfontsize) + error('DefaultFixedFontSize must be a numeric scalar > 0'); + end + end + case 'fontsizemin' + opts.fontmin = LocalToNum(value,auto.fontmin); + if ~ischar(value) | ~strcmp(value,'auto') + if ~LocalIsPositiveScalar(opts.fontmin) + error('FontSizeMin must be a numeric scalar > 0'); + end + end + case 'fontsizemax' + opts.fontmax = LocalToNum(value,auto.fontmax); + if ~ischar(value) | ~strcmp(value,'auto') + if ~LocalIsPositiveScalar(opts.fontmax) + error('FontSizeMax must be a numeric scalar > 0'); + end + end + case 'fontencoding' + opts.fontencoding = LocalCheckAuto(lower(value),auto.fontencoding); + if ~strcmp(opts.fontencoding,{'latin1','adobe'}) + error('FontEncoding must be ''latin1'' or ''adobe''.'); + end + case 'linemode' + opts.linemode = LocalCheckAuto(lower(value),auto.linemode); + if ~strcmp(opts.linemode,{'scaled','fixed'}) + error('LineMode must be ''scaled'' or ''fixed''.'); + end + case 'linewidth' + opts.linewidth = LocalToNum(value,auto.linewidth); + if ~ischar(value) | ~strcmp(value,'auto') + if ~LocalIsPositiveScalar(opts.linewidth) + error('LineWidth must be a numeric scalar > 0'); + end + end + case 'defaultfixedlinewidth' + opts.defaultlinewidth = LocalToNum(value,auto.defaultlinewidth); + if ~ischar(value) | ~strcmp(value,'auto') + if ~LocalIsPositiveScalar(opts.defaultlinewidth) + error(['DefaultFixedLineWidth must be a numeric scalar >' ... + ' 0']); + end + end + case 'linewidthmin' + opts.linemin = LocalToNum(value,auto.linemin); + if ~ischar(value) | ~strcmp(value,'auto') + if ~LocalIsPositiveScalar(opts.linemin) + error('LineWidthMin must be a numeric scalar > 0'); + end + end + case 'linewidthmax' + opts.linemax = LocalToNum(value,auto.linemax); + if ~ischar(value) | ~strcmp(value,'auto') + if ~LocalIsPositiveScalar(opts.linemax) + error('LineWidthMax must be a numeric scalar > 0'); + end + end + case 'linestylemap' + opts.stylemap = LocalCheckAuto(value,auto.stylemap); + case 'renderer' + opts.renderer = LocalCheckAuto(lower(value),auto.renderer); + if ~ischar(value) | ~strcmp(value,'auto') + if ~strcmp(opts.renderer,{'painters','zbuffer','opengl'}) + error(['Renderer must be ''painters'', ''zbuffer'' or' ... + ' ''opengl''.']); + end + end + case 'resolution' + opts.resolution = LocalToNum(value,auto.resolution); + if ~ischar(value) | ~strcmp(value,'auto') + if ~(isnumeric(value) & (prod(size(value)) == 1) & (value >= 0)); + error('Resolution must be a numeric scalar >= 0'); + end + end + case 'applystyle' % means to apply the options and not export + opts.applystyle = 1; + case 'reference' + if ischar(value) + if strcmp(value,'auto') + opts.refobj = auto.refobj; + else + opts.refobj = eval(value); + end + else + opts.refobj = value; + end + if ~LocalIsHG(opts.refobj,'axes') + error('Reference object must evaluate to an axes handle.'); + end + case 'bounds' + opts.bounds = LocalCheckAuto(lower(value),auto.bounds); + explicitbounds = 1; + if ~strcmp(opts.bounds,{'tight','loose'}) + error('Bounds must be ''tight'' or ''loose''.'); + end + case 'lockaxes' + opts.lockaxes = LocalToNum(value,auto.lockaxes); + case 'separatetext' + opts.separatetext = LocalToNum(value,auto.separatetext); + otherwise + error(['Unrecognized option ' param '.']); + end +end + +% make sure figure is up-to-date +drawnow; + +allLines = findall(H, 'type', 'line'); +allText = findall(H, 'type', 'text'); +allAxes = findall(H, 'type', 'axes'); +allImages = findall(H, 'type', 'image'); +allLights = findall(H, 'type', 'light'); +allPatch = findall(H, 'type', 'patch'); +allSurf = findall(H, 'type', 'surface'); +allRect = findall(H, 'type', 'rectangle'); +allFont = [allText; allAxes]; +allColor = [allLines; allText; allAxes; allLights]; +allMarker = [allLines; allPatch; allSurf]; +allEdge = [allPatch; allSurf]; +allCData = [allImages; allPatch; allSurf]; + +old.objs = {}; +old.prop = {}; +old.values = {}; + +% Process format +if strncmp(opts.format,'eps',3) & ~strcmp(opts.preview,'none') + args = {args{:}, ['-' opts.preview]}; +end + +hadError = 0; +oldwarn = warning; +try + + % lock axes limits, ticks and labels if requested + if opts.lockaxes + old = LocalManualAxesMode(old, allAxes, 'TickMode'); + old = LocalManualAxesMode(old, allAxes, 'TickLabelMode'); + old = LocalManualAxesMode(old, allAxes, 'LimMode'); + end + + % Process size parameters + figurePaperUnits = get(H, 'PaperUnits'); + oldFigureUnits = get(H, 'Units'); + oldFigPos = get(H,'Position'); + set(H, 'Units', figurePaperUnits); + figPos = get(H,'Position'); + refsize = figPos(3:4); + if opts.refobj ~= -1 + oldUnits = get(opts.refobj, 'Units'); + set(opts.refobj, 'Units', figurePaperUnits); + r = get(opts.refobj, 'Position'); + refsize = r(3:4); + set(opts.refobj, 'Units', oldUnits); + end + aspectRatio = refsize(1)/refsize(2); + if (opts.width == -1) & (opts.height == -1) + opts.width = refsize(1); + opts.height = refsize(2); + elseif (opts.width == -1) + opts.width = opts.height * aspectRatio; + elseif (opts.height == -1) + opts.height = opts.width / aspectRatio; + end + wscale = opts.width/refsize(1); + hscale = opts.height/refsize(2); + sizescale = min(wscale,hscale); + old = LocalPushOldData(old,H,'PaperPositionMode', ... + get(H,'PaperPositionMode')); + set(H, 'PaperPositionMode', 'auto'); + newPos = [figPos(1) figPos(2)+figPos(4)*(1-hscale) ... + wscale*figPos(3) hscale*figPos(4)]; + set(H, 'Position', newPos); + set(H, 'Units', oldFigureUnits); + + % process line-style map + if ~isempty(opts.stylemap) & ~isempty(allLines) + oldlstyle = LocalGetAsCell(allLines,'LineStyle'); + old = LocalPushOldData(old, allLines, {'LineStyle'}, ... + oldlstyle); + newlstyle = oldlstyle; + if ischar(opts.stylemap) & strcmpi(opts.stylemap,'bw') + newlstyle = LocalMapColorToStyle(allLines); + else + try + newlstyle = feval(opts.stylemap,allLines); + catch + warning(['Skipping stylemap. ' lasterr]); + end + end + set(allLines,{'LineStyle'},newlstyle); + end + + % Process rendering parameters + switch (opts.color) + case {'bw', 'gray'} + if ~strcmp(opts.color,'bw') & strncmp(opts.format,'eps',3) + opts.format = [opts.format 'c']; + end + args = {args{:}, ['-d' opts.format]}; + + %compute and set gray colormap + oldcmap = get(H,'Colormap'); + newgrays = 0.30*oldcmap(:,1) + 0.59*oldcmap(:,2) + 0.11*oldcmap(:,3); + newcmap = [newgrays newgrays newgrays]; + old = LocalPushOldData(old, H, 'Colormap', oldcmap); + set(H, 'Colormap', newcmap); + + %compute and set ColorSpec and CData properties + old = LocalUpdateColors(allColor, 'color', old); + old = LocalUpdateColors(allAxes, 'xcolor', old); + old = LocalUpdateColors(allAxes, 'ycolor', old); + old = LocalUpdateColors(allAxes, 'zcolor', old); + old = LocalUpdateColors(allMarker, 'MarkerEdgeColor', old); + old = LocalUpdateColors(allMarker, 'MarkerFaceColor', old); + old = LocalUpdateColors(allEdge, 'EdgeColor', old); + old = LocalUpdateColors(allEdge, 'FaceColor', old); + old = LocalUpdateColors(allCData, 'CData', old); + + case {'rgb','cmyk'} + if strncmp(opts.format,'eps',3) + opts.format = [opts.format 'c']; + args = {args{:}, ['-d' opts.format]}; + if strcmp(opts.color,'cmyk') + args = {args{:}, '-cmyk'}; + end + else + args = {args{:}, ['-d' opts.format]}; + end + otherwise + error('Invalid Color parameter'); + end + if (~isempty(opts.renderer)) + args = {args{:}, ['-' opts.renderer]}; + end + if (~isempty(opts.resolution)) | ~strncmp(opts.format,'eps',3) + if isempty(opts.resolution) + opts.resolution = 0; + end + args = {args{:}, ['-r' int2str(opts.resolution)]}; + end + + % Process font parameters + if ~isempty(opts.fontmode) + oldfonts = LocalGetAsCell(allFont,'FontSize'); + oldfontunits = LocalGetAsCell(allFont,'FontUnits'); + set(allFont,'FontUnits','points'); + switch (opts.fontmode) + case 'fixed' + if (opts.fontsize == -1) + set(allFont,'FontSize',opts.defaultfontsize); + else + set(allFont,'FontSize',opts.fontsize); + end + case 'scaled' + if (opts.fontsize == -1) + scale = sizescale; + else + scale = opts.fontsize; + end + newfonts = LocalScale(oldfonts,scale,opts.fontmin,opts.fontmax); + set(allFont,{'FontSize'},newfonts); + otherwise + error('Invalid FontMode parameter'); + end + old = LocalPushOldData(old, allFont, {'FontSize'}, oldfonts); + old = LocalPushOldData(old, allFont, {'FontUnits'}, oldfontunits); + end + if strcmp(opts.fontencoding,'adobe') & strncmp(opts.format,'eps',3) + args = {args{:}, '-adobecset'}; + end + + % Process line parameters + if ~isempty(opts.linemode) + oldlines = LocalGetAsCell(allMarker,'LineWidth'); + old = LocalPushOldData(old, allMarker, {'LineWidth'}, oldlines); + switch (opts.linemode) + case 'fixed' + if (opts.linewidth == -1) + set(allMarker,'LineWidth',opts.defaultlinewidth); + else + set(allMarker,'LineWidth',opts.linewidth); + end + case 'scaled' + if (opts.linewidth == -1) + scale = sizescale; + else + scale = opts.linewidth; + end + newlines = LocalScale(oldlines, scale, opts.linemin, opts.linemax); + set(allMarker,{'LineWidth'},newlines); + end + end + + % adjust figure bounds to surround axes + if strcmp(opts.bounds,'tight') + if (~strncmp(opts.format,'eps',3) & LocalHas3DPlot(allAxes)) | ... + (strncmp(opts.format,'eps',3) & opts.separatetext) + if (explicitbounds == 1) + warning(['Cannot compute ''tight'' bounds. Using ''loose''' ... + ' bounds.']); + end + opts.bounds = 'loose'; + end + end + warning('off'); + if ~isempty(allAxes) + if strncmp(opts.format,'eps',3) + if strcmp(opts.bounds,'loose') + args = {args{:}, '-loose'}; + end + old = LocalPushOldData(old,H,'Position', oldFigPos); + elseif strcmp(opts.bounds,'tight') + oldaunits = LocalGetAsCell(allAxes,'Units'); + oldapos = LocalGetAsCell(allAxes,'Position'); + oldtunits = LocalGetAsCell(allText,'units'); + oldtpos = LocalGetAsCell(allText,'Position'); + set(allAxes,'units','points'); + apos = LocalGetAsCell(allAxes,'Position'); + oldunits = get(H,'Units'); + set(H,'units','points'); + origfr = get(H,'position'); + fr = []; + for k=1:length(allAxes) + if ~strcmpi(get(allAxes(k),'Tag'),'legend') + axesR = apos{k}; + r = LocalAxesTightBoundingBox(axesR, allAxes(k)); + r(1:2) = r(1:2) + axesR(1:2); + fr = LocalUnionRect(fr,r); + end + end + if isempty(fr) + fr = [0 0 origfr(3:4)]; + end + for k=1:length(allAxes) + ax = allAxes(k); + r = apos{k}; + r(1:2) = r(1:2) - fr(1:2); + set(ax,'Position',r); + end + old = LocalPushOldData(old, allAxes, {'Position'}, oldapos); + old = LocalPushOldData(old, allText, {'Position'}, oldtpos); + old = LocalPushOldData(old, allText, {'Units'}, oldtunits); + old = LocalPushOldData(old, allAxes, {'Units'}, oldaunits); + old = LocalPushOldData(old, H, 'Position', oldFigPos); + old = LocalPushOldData(old, H, 'Units', oldFigureUnits); + r = [origfr(1) origfr(2)+origfr(4)-fr(4) fr(3:4)]; + set(H,'Position',r); + else + args = {args{:}, '-loose'}; + old = LocalPushOldData(old,H,'Position', oldFigPos); + end + end + + % Process text in a separate file if needed + if opts.separatetext & ~opts.applystyle + % First hide all text and export + oldtvis = LocalGetAsCell(allText,'visible'); + set(allText,'visible','off'); + oldax = LocalGetAsCell(allAxes,'XTickLabel',1); + olday = LocalGetAsCell(allAxes,'YTickLabel',1); + oldaz = LocalGetAsCell(allAxes,'ZTickLabel',1); + null = cell(length(oldax),1); + [null{:}] = deal([]); + set(allAxes,{'XTickLabel'},null); + set(allAxes,{'YTickLabel'},null); + set(allAxes,{'ZTickLabel'},null); + print(H, filename, args{:}); + set(allText,{'Visible'},oldtvis); + set(allAxes,{'XTickLabel'},oldax); + set(allAxes,{'YTickLabel'},olday); + set(allAxes,{'ZTickLabel'},oldaz); + % Now hide all non-text and export as eps in painters + [path, name, ext] = fileparts(filename); + tfile = fullfile(path,[name '_t.eps']); + tfile2 = fullfile(path,[name '_t2.eps']); + foundRenderer = 0; + for k=1:length(args) + if strncmp('-d',args{k},2) + args{k} = '-deps'; + elseif strncmp('-zbuffer',args{k},8) | ... + strncmp('-opengl', args{k},6) + args{k} = '-painters'; + foundRenderer = 1; + end + end + if ~foundRenderer + args = {args{:}, '-painters'}; + end + allNonText = [allLines; allLights; allPatch; ... + allImages; allSurf; allRect]; + oldvis = LocalGetAsCell(allNonText,'visible'); + oldc = LocalGetAsCell(allAxes,'color'); + oldaxg = LocalGetAsCell(allAxes,'XGrid'); + oldayg = LocalGetAsCell(allAxes,'YGrid'); + oldazg = LocalGetAsCell(allAxes,'ZGrid'); + [null{:}] = deal('off'); + set(allAxes,{'XGrid'},null); + set(allAxes,{'YGrid'},null); + set(allAxes,{'ZGrid'},null); + set(allNonText,'Visible','off'); + set(allAxes,'Color','none'); + print(H, tfile2, args{:}); + set(allNonText,{'Visible'},oldvis); + set(allAxes,{'Color'},oldc); + set(allAxes,{'XGrid'},oldaxg); + set(allAxes,{'YGrid'},oldayg); + set(allAxes,{'ZGrid'},oldazg); + %hack up the postscript file + fid1 = fopen(tfile,'w'); + fid2 = fopen(tfile2,'r'); + line = fgetl(fid2); + while ischar(line) + if strncmp(line,'%%Title',7) + fprintf(fid1,'%s\n',['%%Title: ', tfile]); + elseif (length(line) < 3) + fprintf(fid1,'%s\n',line); + elseif ~strcmp(line(end-2:end),' PR') & ... + ~strcmp(line(end-1:end),' L') + fprintf(fid1,'%s\n',line); + end + line = fgetl(fid2); + end + fclose(fid1); + fclose(fid2); + delete(tfile2); + + elseif ~opts.applystyle + drawnow; + print(H, filename, args{:}); + end + warning(oldwarn); + +catch + warning(oldwarn); + hadError = 1; +end + +% Restore figure settings +if opts.applystyle + varargout{1} = old; +else + for n=1:length(old.objs) + if ~iscell(old.values{n}) & iscell(old.prop{n}) + old.values{n} = {old.values{n}}; + end + set(old.objs{n}, old.prop{n}, old.values{n}); + end +end + +if hadError + error(deblank(lasterr)); +end + +% +% Local Functions +% + +function outData = LocalPushOldData(inData, objs, prop, values) +outData.objs = {objs, inData.objs{:}}; +outData.prop = {prop, inData.prop{:}}; +outData.values = {values, inData.values{:}}; + +function cellArray = LocalGetAsCell(fig,prop,allowemptycell); +cellArray = get(fig,prop); +if nargin < 3 + allowemptycell = 0; +end +if ~iscell(cellArray) & (allowemptycell | ~isempty(cellArray)) + cellArray = {cellArray}; +end + +function newArray = LocalScale(inArray, scale, minv, maxv) +n = length(inArray); +newArray = cell(n,1); +for k=1:n + newArray{k} = min(maxv,max(minv,scale*inArray{k}(1))); +end + +function gray = LocalMapToGray1(color) +gray = color; +if ischar(color) + switch color(1) + case 'y' + color = [1 1 0]; + case 'm' + color = [1 0 1]; + case 'c' + color = [0 1 1]; + case 'r' + color = [1 0 0]; + case 'g' + color = [0 1 0]; + case 'b' + color = [0 0 1]; + case 'w' + color = [1 1 1]; + case 'k' + color = [0 0 0]; + end +end +if ~ischar(color) + gray = 0.30*color(1) + 0.59*color(2) + 0.11*color(3); +end + +function newArray = LocalMapToGray(inArray); +n = length(inArray); +newArray = cell(n,1); +for k=1:n + color = inArray{k}; + if ~isempty(color) + color = LocalMapToGray1(color); + end + if isempty(color) | ischar(color) + newArray{k} = color; + else + newArray{k} = [color color color]; + end +end + +function newArray = LocalMapColorToStyle(inArray); +inArray = LocalGetAsCell(inArray,'Color'); +n = length(inArray); +newArray = cell(n,1); +styles = {'-','--',':','-.'}; +uniques = []; +nstyles = length(styles); +for k=1:n + gray = LocalMapToGray1(inArray{k}); + if isempty(gray) | ischar(gray) | gray < .05 + newArray{k} = '-'; + else + if ~isempty(uniques) & any(gray == uniques) + ind = find(gray==uniques); + else + uniques = [uniques gray]; + ind = length(uniques); + end + newArray{k} = styles{mod(ind-1,nstyles)+1}; + end +end + +function newArray = LocalMapCData(inArray); +n = length(inArray); +newArray = cell(n,1); +for k=1:n + color = inArray{k}; + if (ndims(color) == 3) & isa(color,'double') + gray = 0.30*color(:,:,1) + 0.59*color(:,:,2) + 0.11*color(:,:,3); + color(:,:,1) = gray; + color(:,:,2) = gray; + color(:,:,3) = gray; + end + newArray{k} = color; +end + +function outData = LocalUpdateColors(inArray, prop, inData) +value = LocalGetAsCell(inArray,prop); +outData.objs = {inData.objs{:}, inArray}; +outData.prop = {inData.prop{:}, {prop}}; +outData.values = {inData.values{:}, value}; +if (~isempty(value)) + if strcmp(prop,'CData') + value = LocalMapCData(value); + else + value = LocalMapToGray(value); + end + set(inArray,{prop},value); +end + +function bool = LocalIsPositiveScalar(value) +bool = isnumeric(value) & ... + prod(size(value)) == 1 & ... + value > 0; + +function value = LocalToNum(value,auto) +if ischar(value) + if strcmp(value,'auto') + value = auto; + else + value = str2num(value); + end +end + +%convert a struct to {field1,val1,field2,val2,...} +function c = LocalToCell(s) +f = fieldnames(s); +v = struct2cell(s); +opts = cell(2,length(f)); +opts(1,:) = f; +opts(2,:) = v; +c = {opts{:}}; + +function c = LocalIsHG(obj,hgtype) +c = 0; +if (length(obj) == 1) & ishandle(obj) + c = strcmp(get(obj,'type'),hgtype); +end + +function c = LocalHas3DPlot(a) +zticks = LocalGetAsCell(a,'ZTickLabel'); +c = 0; +for k=1:length(zticks) + if ~isempty(zticks{k}) + c = 1; + return; + end +end + +function r = LocalUnionRect(r1,r2) +if isempty(r1) + r = r2; +elseif isempty(r2) + r = r1; +elseif max(r2(3:4)) > 0 + left = min(r1(1),r2(1)); + bot = min(r1(2),r2(2)); + right = max(r1(1)+r1(3),r2(1)+r2(3)); + top = max(r1(2)+r1(4),r2(2)+r2(4)); + r = [left bot right-left top-bot]; +else + r = r1; +end + +function c = LocalLabelsMatchTicks(labs,ticks) +c = 0; +try + t1 = num2str(ticks(1)); + n = length(ticks); + tend = num2str(ticks(n)); + c = strncmp(labs(1),t1,length(labs(1))) & ... + strncmp(labs(n),tend,length(labs(n))); +end + +function r = LocalAxesTightBoundingBox(axesR, a) +r = []; +atext = findall(a,'type','text','visible','on'); +if ~isempty(atext) + set(atext,'units','points'); + res=LocalGetAsCell(atext,'extent'); + for n=1:length(atext) + r = LocalUnionRect(r,res{n}); + end +end +if strcmp(get(a,'visible'),'on') + r = LocalUnionRect(r,[0 0 axesR(3:4)]); + oldunits = get(a,'fontunits'); + set(a,'fontunits','points'); + label = text(0,0,'','parent',a,... + 'units','points',... + 'fontsize',get(a,'fontsize'),... + 'fontname',get(a,'fontname'),... + 'fontweight',get(a,'fontweight'),... + 'fontangle',get(a,'fontangle'),... + 'visible','off'); + fs = get(a,'fontsize'); + + % handle y axis tick labels + ry = [0 -fs/2 0 axesR(4)+fs]; + ylabs = get(a,'yticklabels'); + yticks = get(a,'ytick'); + maxw = 0; + if ~isempty(ylabs) + for n=1:size(ylabs,1) + set(label,'string',ylabs(n,:)); + ext = get(label,'extent'); + maxw = max(maxw,ext(3)); + end + if ~LocalLabelsMatchTicks(ylabs,yticks) & ... + strcmp(get(a,'xaxislocation'),'bottom') + ry(4) = ry(4) + 1.5*ext(4); + end + if strcmp(get(a,'yaxislocation'),'left') + ry(1) = -(maxw+5); + else + ry(1) = axesR(3); + end + ry(3) = maxw+5; + r = LocalUnionRect(r,ry); + end + + % handle x axis tick labels + rx = [0 0 0 fs+5]; + xlabs = get(a,'xticklabels'); + xticks = get(a,'xtick'); + if ~isempty(xlabs) + if strcmp(get(a,'xaxislocation'),'bottom') + rx(2) = -(fs+5); + if ~LocalLabelsMatchTicks(xlabs,xticks); + rx(4) = rx(4) + 2*fs; + rx(2) = rx(2) - 2*fs; + end + else + rx(2) = axesR(4); + % exponent is still below axes + if ~LocalLabelsMatchTicks(xlabs,xticks); + rx(4) = rx(4) + axesR(4) + 2*fs; + rx(2) = -2*fs; + end + end + set(label,'string',xlabs(1,:)); + ext1 = get(label,'extent'); + rx(1) = -ext1(3)/2; + set(label,'string',xlabs(size(xlabs,1),:)); + ext2 = get(label,'extent'); + rx(3) = axesR(3) + (ext2(3) + ext1(3))/2; + r = LocalUnionRect(r,rx); + end + set(a,'fontunits',oldunits); + delete(label); +end + +function c = LocalManualAxesMode(old, allAxes, base) +xs = ['X' base]; +ys = ['Y' base]; +zs = ['Z' base]; +oldXMode = LocalGetAsCell(allAxes,xs); +oldYMode = LocalGetAsCell(allAxes,ys); +oldZMode = LocalGetAsCell(allAxes,zs); +old = LocalPushOldData(old, allAxes, {xs}, oldXMode); +old = LocalPushOldData(old, allAxes, {ys}, oldYMode); +old = LocalPushOldData(old, allAxes, {zs}, oldZMode); +set(allAxes,xs,'manual'); +set(allAxes,ys,'manual'); +set(allAxes,zs,'manual'); +c = old; + +function val = LocalCheckAuto(val, auto) +if ischar(val) & strcmp(val,'auto') + val = auto; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/extend_domain_table.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/extend_domain_table.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,24 @@ +function B = extend_domain_table(A, smalldom, smallsz, bigdom, bigsz) +% EXTEND_DOMAIN_TABLE Expand an array so it has the desired size. +% B = extend_domain_table(A, smalldom, smallsz, bigdom, bigsz) +% +% A is the array with domain smalldom and sizes smallsz. +% bigdom is the desired domain, with sizes bigsz. +% +% Example: +% smalldom = [1 3], smallsz = [2 4], bigdom = [1 2 3 4], bigsz = [2 1 4 5], +% so B(i,j,k,l) = A(i,k) for i in 1:2, j in 1:1, k in 1:4, l in 1:5 + +if isequal(size(A), [1 1]) % a scalar + B = A; % * myones(bigsz); + return; +end + +map = find_equiv_posns(smalldom, bigdom); +sz = ones(1, length(bigdom)); +sz(map) = smallsz; +B = myreshape(A, sz); % add dimensions for the stuff not in A +sz = bigsz; +sz(map) = 1; % don't replicate along A's dimensions +B = myrepmat(B, sz(:)'); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/factorial.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/factorial.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,9 @@ +function x = factorial(n) +% FACTORIAL Compute n! +% x = factorial(n) + +if n == 0 + x = 1; +else + x = n*factorial(n-1); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/filepartsLast.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/filepartsLast.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,12 @@ +function [last] = filepartsLast(fname) +% filepartsLast Return the last part of a filename (strip off directory and suffix) +% function filepartsLast(fname) +% +% Examples +% filepartsLast('C:/foo/bar') = 'bar' +% filepartsLast('C:/foo/bar.mat') = 'bar' +% filepartsLast('C:/foo/bar.mat.gz') = 'bar.mat' +% filepartsLast('bar.mat') = 'bar' + +[pathstr,name,ext,versn] = fileparts(fname); +last = name; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/find_equiv_posns.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/find_equiv_posns.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,24 @@ +function p = find_equiv_posns(vsmall, vlarge) +% FIND_EQUIV_POSNS p[i] = the place where vsmall[i] occurs in vlarge. +% p = find_equiv_posns(vsmall, vlarge) +% THE VECTORS ARE ASSUMED TO BE SORTED. +% +% e.g., vsmall=[2,8], vlarge=[2,7,8,4], p=[1,3] +% +% In R/S, this function is called 'match' + +%if ~mysubset(vsmall, vlarge) +% error('small domain must occur in large domain'); +%end + +if isempty(vsmall) | isempty(vlarge) + p = []; + return; +end + +bitvec = sparse(1, max(vlarge)); +%bitvec = zeros(1, max(vlarge)); +bitvec(vsmall) = 1; +p = find(bitvec(vlarge)); + +%p = find(ismember(vlarge, vsmall)); % slower diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/fullfileKPM.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/fullfileKPM.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,6 @@ +function f = fullfileKPM(varargin) +% fullfileKPM Concatenate strings with file separator, then convert it to a/b/c +% function f = fullfileKPM(varargin) + +f = fullfile(varargin{:}); +f = strrep(f, '\', '/'); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/genpathKPM.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/genpathKPM.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,45 @@ +function p = genpathKPM(d) +% genpathKPM Like built-in genpath, but omits directories whose name is 'Old', 'old' or 'CVS' +% function p = genpathKPM(d) + +if nargin==0, + p = genpath(fullfile(matlabroot,'toolbox')); + if length(p) > 1, p(end) = []; end % Remove trailing pathsep + return +end + +% initialise variables +methodsep = '@'; % qualifier for overloaded method directories +p = ''; % path to be returned + +% Generate path based on given root directory +files = dir(d); +if isempty(files) + return +end + +% Add d to the path even if it is empty. +p = [p d pathsep]; + +% set logical vector for subdirectory entries in d +isdir = logical(cat(1,files.isdir)); +% +% Recursively descend through directories which are neither +% private nor "class" directories. +% +dirs = files(isdir); % select only directory entries from the current listing + +for i=1:length(dirs) + dirname = dirs(i).name; + if ~strcmp( dirname,'.') & ... + ~strcmp( dirname,'..') & ... + ~strncmp( dirname,methodsep,1)& ... + ~strcmp( dirname,'private') & ... + ~strcmp( dirname, 'old') & ... % KPM + ~strcmp( dirname, 'Old') & ... % KPM + ~strcmp( dirname, 'CVS') % KPM + p = [p genpathKPM(fullfile(d,dirname))]; % recursive calling of this function. + end +end + +%------------------------------------------------------------------------------ diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/hash_add.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/hash_add.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,19 @@ +function hash_add(key, val, fname) +% HASH_ADD Append key,value pair to end of hashtable stored in a file +% function hash_add(key, val, filename) +% +% See hash_lookup for an example + +if ~exist(fname, 'file') + % new hashtable + hashtable.key{1} = key; + hashtable.value{1} = val; +else + %hashtable = importdata(fname); + %hashtable = load(fname, '-mat'); + load(fname, '-mat'); + Nentries = length(hashtable.key); + hashtable.key{Nentries+1} = key; + hashtable.value{Nentries+1} = val; +end +save(fname, 'hashtable', '-mat'); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/hash_del.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/hash_del.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,26 @@ +function ndx = hash_del(key, fname) +% HASH_DEL Remove all entries that match key from hashtable stored in a file +% ndx = hash_del(key, fname) +% +% Returns indices of matching entries (if any) +% See hash_lookup for an example + +ndx = []; + +if ~exist(fname, 'file') + % new hashtable - no op +else + %hashtable = importdata(fname); + %hashtable = load(fname, '-mat'); + load(fname, '-mat'); + Nentries = length(hashtable.key); + for i=1:Nentries + if isequal(hashtable.key{i}, key) + ndx = [ndx i]; + end + end + hashtable.key(ndx) = []; + hashtable.value(ndx) = []; + save(fname, 'hashtable', '-mat'); +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/hash_lookup.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/hash_lookup.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,34 @@ +function [val, found, Nentries] = hash_lookup(key, fname) +% HASH_LOOKUP Lookup a key in a hash table stored in a file using linear search +% function [val, found, Nentries] = hash_lookup(key, filename) +% +% Example: +% If htbl.mat does not exist, +% [val,found,N] = hash_lookup('foo', 'htbl') +% returns found val = [], found = 0, N = 0 +% hash_add('foo', 42, 'htbl') +% hash_add('bar', [1:10], 'htbl') +% [val,found,N] = hash_lookup('foo', 'htbl') +% now returns val = 42, found = 1, N = 2 +% +% Type 'delete htbl' to delete the file/ reset the hashtable + + +val = []; +found = 0; + +if exist(fname, 'file')==0 + % new hashtable + Nentries = 0; +else + %hashtable = importdata(fname); + load(fname); + Nentries = length(hashtable.key); + for i=1:Nentries + if isequal(hashtable.key{i}, key) + val = hashtable.value{i}; + found = 1; + break; + end + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/hsvKPM.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/hsvKPM.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,7 @@ +function colors = hsvKPM(N) +% hsvKPM Like built-in HSV, except it randomizes the order, so that adjacent colors are dis-similar +% function colors = hsvKPM(N) + +colors = hsv(N); +perm = randperm(N); +colors = colors(perm,:); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/hungarian.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/hungarian.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,464 @@ +function [C,T]=hungarian(A) +%HUNGARIAN Solve the Assignment problem using the Hungarian method. +% +%[C,T]=hungarian(A) +%A - a square cost matrix. +%C - the optimal assignment. +%T - the cost of the optimal assignment. + +% Adapted from the FORTRAN IV code in Carpaneto and Toth, "Algorithm 548: +% Solution of the assignment problem [H]", ACM Transactions on +% Mathematical Software, 6(1):104-111, 1980. + +% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se. +% Department of Computing Science, Umeå University, +% Sweden. +% All standard disclaimers apply. + +% A substantial effort was put into this code. If you use it for a +% publication or otherwise, please include an acknowledgement or at least +% notify me by email. /Niclas + +[m,n]=size(A); + +if (m~=n) + error('HUNGARIAN: Cost matrix must be square!'); +end + +% Save original cost matrix. +orig=A; + +% Reduce matrix. +A=hminired(A); + +% Do an initial assignment. +[A,C,U]=hminiass(A); + +% Repeat while we have unassigned rows. +while (U(n+1)) + % Start with no path, no unchecked zeros, and no unexplored rows. + LR=zeros(1,n); + LC=zeros(1,n); + CH=zeros(1,n); + RH=[zeros(1,n) -1]; + + % No labelled columns. + SLC=[]; + + % Start path in first unassigned row. + r=U(n+1); + % Mark row with end-of-path label. + LR(r)=-1; + % Insert row first in labelled row set. + SLR=r; + + % Repeat until we manage to find an assignable zero. + while (1) + % If there are free zeros in row r + if (A(r,n+1)~=0) + % ...get column of first free zero. + l=-A(r,n+1); + + % If there are more free zeros in row r and row r in not + % yet marked as unexplored.. + if (A(r,l)~=0 & RH(r)==0) + % Insert row r first in unexplored list. + RH(r)=RH(n+1); + RH(n+1)=r; + + % Mark in which column the next unexplored zero in this row + % is. + CH(r)=-A(r,l); + end + else + % If all rows are explored.. + if (RH(n+1)<=0) + % Reduce matrix. + [A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR); + end + + % Re-start with first unexplored row. + r=RH(n+1); + % Get column of next free zero in row r. + l=CH(r); + % Advance "column of next free zero". + CH(r)=-A(r,l); + % If this zero is last in the list.. + if (A(r,l)==0) + % ...remove row r from unexplored list. + RH(n+1)=RH(r); + RH(r)=0; + end + end + + % While the column l is labelled, i.e. in path. + while (LC(l)~=0) + % If row r is explored.. + if (RH(r)==0) + % If all rows are explored.. + if (RH(n+1)<=0) + % Reduce cost matrix. + [A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR); + end + + % Re-start with first unexplored row. + r=RH(n+1); + end + + % Get column of next free zero in row r. + l=CH(r); + + % Advance "column of next free zero". + CH(r)=-A(r,l); + + % If this zero is last in list.. + if(A(r,l)==0) + % ...remove row r from unexplored list. + RH(n+1)=RH(r); + RH(r)=0; + end + end + + % If the column found is unassigned.. + if (C(l)==0) + % Flip all zeros along the path in LR,LC. + [A,C,U]=hmflip(A,C,LC,LR,U,l,r); + % ...and exit to continue with next unassigned row. + break; + else + % ...else add zero to path. + + % Label column l with row r. + LC(l)=r; + + % Add l to the set of labelled columns. + SLC=[SLC l]; + + % Continue with the row assigned to column l. + r=C(l); + + % Label row r with column l. + LR(r)=l; + + % Add r to the set of labelled rows. + SLR=[SLR r]; + end + end +end + +% Calculate the total cost. +T=sum(orig(logical(sparse(C,1:size(orig,2),1)))); + + +function A=hminired(A) +%HMINIRED Initial reduction of cost matrix for the Hungarian method. +% +%B=assredin(A) +%A - the unreduced cost matris. +%B - the reduced cost matrix with linked zeros in each row. + +% v1.0 96-06-13. Niclas Borlin, niclas@cs.umu.se. + +[m,n]=size(A); + +% Subtract column-minimum values from each column. +colMin=min(A); +A=A-colMin(ones(n,1),:); + +% Subtract row-minimum values from each row. +rowMin=min(A')'; +A=A-rowMin(:,ones(1,n)); + +% Get positions of all zeros. +[i,j]=find(A==0); + +% Extend A to give room for row zero list header column. +A(1,n+1)=0; +for k=1:n + % Get all column in this row. + cols=j(k==i)'; + % Insert pointers in matrix. + A(k,[n+1 cols])=[-cols 0]; +end + + +function [A,C,U]=hminiass(A) +%HMINIASS Initial assignment of the Hungarian method. +% +%[B,C,U]=hminiass(A) +%A - the reduced cost matrix. +%B - the reduced cost matrix, with assigned zeros removed from lists. +%C - a vector. C(J)=I means row I is assigned to column J, +% i.e. there is an assigned zero in position I,J. +%U - a vector with a linked list of unassigned rows. + +% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se. + +[n,np1]=size(A); + +% Initalize return vectors. +C=zeros(1,n); +U=zeros(1,n+1); + +% Initialize last/next zero "pointers". +LZ=zeros(1,n); +NZ=zeros(1,n); + +for i=1:n + % Set j to first unassigned zero in row i. + lj=n+1; + j=-A(i,lj); + + % Repeat until we have no more zeros (j==0) or we find a zero + % in an unassigned column (c(j)==0). + + while (C(j)~=0) + % Advance lj and j in zero list. + lj=j; + j=-A(i,lj); + + % Stop if we hit end of list. + if (j==0) + break; + end + end + + if (j~=0) + % We found a zero in an unassigned column. + + % Assign row i to column j. + C(j)=i; + + % Remove A(i,j) from unassigned zero list. + A(i,lj)=A(i,j); + + % Update next/last unassigned zero pointers. + NZ(i)=-A(i,j); + LZ(i)=lj; + + % Indicate A(i,j) is an assigned zero. + A(i,j)=0; + else + % We found no zero in an unassigned column. + + % Check all zeros in this row. + + lj=n+1; + j=-A(i,lj); + + % Check all zeros in this row for a suitable zero in another row. + while (j~=0) + % Check the in the row assigned to this column. + r=C(j); + + % Pick up last/next pointers. + lm=LZ(r); + m=NZ(r); + + % Check all unchecked zeros in free list of this row. + while (m~=0) + % Stop if we find an unassigned column. + if (C(m)==0) + break; + end + + % Advance one step in list. + lm=m; + m=-A(r,lm); + end + + if (m==0) + % We failed on row r. Continue with next zero on row i. + lj=j; + j=-A(i,lj); + else + % We found a zero in an unassigned column. + + % Replace zero at (r,m) in unassigned list with zero at (r,j) + A(r,lm)=-j; + A(r,j)=A(r,m); + + % Update last/next pointers in row r. + NZ(r)=-A(r,m); + LZ(r)=j; + + % Mark A(r,m) as an assigned zero in the matrix . . . + A(r,m)=0; + + % ...and in the assignment vector. + C(m)=r; + + % Remove A(i,j) from unassigned list. + A(i,lj)=A(i,j); + + % Update last/next pointers in row r. + NZ(i)=-A(i,j); + LZ(i)=lj; + + % Mark A(r,m) as an assigned zero in the matrix . . . + A(i,j)=0; + + % ...and in the assignment vector. + C(j)=i; + + % Stop search. + break; + end + end + end +end + +% Create vector with list of unassigned rows. + +% Mark all rows have assignment. +r=zeros(1,n); +rows=C(C~=0); +r(rows)=rows; +empty=find(r==0); + +% Create vector with linked list of unassigned rows. +U=zeros(1,n+1); +U([n+1 empty])=[empty 0]; + + +function [A,C,U]=hmflip(A,C,LC,LR,U,l,r) +%HMFLIP Flip assignment state of all zeros along a path. +% +%[A,C,U]=hmflip(A,C,LC,LR,U,l,r) +%Input: +%A - the cost matrix. +%C - the assignment vector. +%LC - the column label vector. +%LR - the row label vector. +%U - the +%r,l - position of last zero in path. +%Output: +%A - updated cost matrix. +%C - updated assignment vector. +%U - updated unassigned row list vector. + +% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se. + +n=size(A,1); + +while (1) + % Move assignment in column l to row r. + C(l)=r; + + % Find zero to be removed from zero list.. + + % Find zero before this. + m=find(A(r,:)==-l); + + % Link past this zero. + A(r,m)=A(r,l); + + A(r,l)=0; + + % If this was the first zero of the path.. + if (LR(r)<0) + ...remove row from unassigned row list and return. + U(n+1)=U(r); + U(r)=0; + return; + else + + % Move back in this row along the path and get column of next zero. + l=LR(r); + + % Insert zero at (r,l) first in zero list. + A(r,l)=A(r,n+1); + A(r,n+1)=-l; + + % Continue back along the column to get row of next zero in path. + r=LC(l); + end +end + + +function [A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR) +%HMREDUCE Reduce parts of cost matrix in the Hungerian method. +% +%[A,CH,RH]=hmreduce(A,CH,RH,LC,LR,SLC,SLR) +%Input: +%A - Cost matrix. +%CH - vector of column of 'next zeros' in each row. +%RH - vector with list of unexplored rows. +%LC - column labels. +%RC - row labels. +%SLC - set of column labels. +%SLR - set of row labels. +% +%Output: +%A - Reduced cost matrix. +%CH - Updated vector of 'next zeros' in each row. +%RH - Updated vector of unexplored rows. + +% v1.0 96-06-14. Niclas Borlin, niclas@cs.umu.se. + +n=size(A,1); + +% Find which rows are covered, i.e. unlabelled. +coveredRows=LR==0; + +% Find which columns are covered, i.e. labelled. +coveredCols=LC~=0; + +r=find(~coveredRows); +c=find(~coveredCols); + +% Get minimum of uncovered elements. +m=min(min(A(r,c))); + +% Subtract minimum from all uncovered elements. +A(r,c)=A(r,c)-m; + +% Check all uncovered columns.. +for j=c + % ...and uncovered rows in path order.. + for i=SLR + % If this is a (new) zero.. + if (A(i,j)==0) + % If the row is not in unexplored list.. + if (RH(i)==0) + % ...insert it first in unexplored list. + RH(i)=RH(n+1); + RH(n+1)=i; + % Mark this zero as "next free" in this row. + CH(i)=j; + end + % Find last unassigned zero on row I. + row=A(i,:); + colsInList=-row(row<0); + if (length(colsInList)==0) + % No zeros in the list. + l=n+1; + else + l=colsInList(row(colsInList)==0); + end + % Append this zero to end of list. + A(i,l)=-j; + end + end +end + +% Add minimum to all doubly covered elements. +r=find(coveredRows); +c=find(coveredCols); + +% Take care of the zeros we will remove. +[i,j]=find(A(r,c)<=0); + +i=r(i); +j=c(j); + +for k=1:length(i) + % Find zero before this in this row. + lj=find(A(i(k),:)==-j(k)); + % Link past it. + A(i(k),lj)=A(i(k),j(k)); + % Mark it as assigned. + A(i(k),j(k))=0; +end + +A(r,c)=A(r,c)+m; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/image_rgb.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/image_rgb.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,36 @@ +function image_rgb(M) +% Show a matrix of integers as a color image. +% This is like imagesc, except we know what the mapping is from integer to color. +% If entries of M contain integers in {1,2,3}, we map +% this to red/green/blue + +cmap = [1 0 0; % red + 0 1 0; % green + 0 0 1; % blue + 127/255 1 212/255]; % aquamarine +image(M) +set(gcf,'colormap', cmap); + +if 1 + % make dummy handles, one per object type, for the legend + str = {}; + for i=1:size(cmap,1) + dummy_handle(i) = line([0 0.1], [0 0.1]); + set(dummy_handle(i), 'color', cmap(i,:)); + set(dummy_handle(i), 'linewidth', 2); + str{i} = num2str(i); + end + legend(dummy_handle, str, -1); +end + +if 0 +[nrows ncols] = size(M); +img = zeros(nrows, ncols, 3); +for r=1:nrows + for c=1:ncols + q = M(r,c); + img(r,c,q) = 1; + end +end +image(img) +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/imresizeAspect.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/imresizeAspect.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,44 @@ +function img = imresizeAspect(img, maxSize) +% function img = imresizeAspect(img, maxSize) +% If image is larger than max size, reduce size, preserving aspect ratio of input. +% +% If size(input) = [y x] and maxSize = [yy xx], +% then size(output) is given by the following (where a=y/x) +% if y + +void rbinary(int num, int n, double *rbits){ + int i, mask; + num = num - 1; + + mask = 1 << (n-1); /* mask = 00100...0 , where the 1 is in column n (rightmost = col 1) */ + for (i = 0; i < n; i++) { + rbits[n-i-1] = ((num & mask) == 0) ? 1 : 2; + num <<= 1; + } +} + +void ind_subv(int num, const double *sizes, int n, double *rbits){ + int i; + int *cumprod; + + cumprod = malloc(n * sizeof(int)); + num = num - 1; + cumprod[0] = 1; + for (i = 0; i < n-1; i++) + cumprod[i+1] = cumprod[i] * (int)sizes[i]; + for (i = n-1; i >= 0; i--) { + rbits[i] = ((int)floor(num / cumprod[i])) + 1; + num = num % cumprod[i]; + } + free(cumprod); +} + + +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){ + int i, j, k, nCol, nRow, nnRow, binary, count, temp, temp1, start; + double *pSize, *pNdx, *pr; + double ndx; + int *subv, *cumprod, *templai; + + pSize = mxGetPr(prhs[0]); + pNdx = mxGetPr(prhs[1]); + nCol = mxGetNumberOfElements(prhs[0]); + nnRow = mxGetNumberOfElements(prhs[1]); + + nRow = 1; + for(i=0; i 2.0){ + binary = 0; + break; + } + else if((int)pSize[i] == 1){ + binary = 1; + } + } + + if(nnRow == 1){ + ndx = mxGetScalar(prhs[1]); + plhs[0] = mxCreateDoubleMatrix(1, nCol, mxREAL); + pr = mxGetPr(plhs[0]); + if(binary == 2)rbinary((int)ndx, nCol, pr); + else ind_subv((int)ndx, pSize, nCol, pr); + return; + } + + plhs[0] = mxCreateDoubleMatrix(nnRow, nCol, mxREAL); + pr = mxGetPr(plhs[0]); + + subv = malloc(nRow * nCol * sizeof(int)); + + if (binary == 2) { + for(j=0; j> k; + } + for(j=0; j 2) temp = 1; + for(k=0; k (int)pSize[j]) temp = 1; + for(k=0; k= 0 for any vector v. +% We do this by checking that all the eigenvalues are non-negative. + +E = eig(M); +if length(find(E>=0)) == length(E) + b = 1; +else + b = 0; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/is_stochastic.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/is_stochastic.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function p = is_stochastic(T) +% IS_STOCHASTIC Is the argument a stochastic matrix, i.e., the sum over the last dimension is 1. +% p = is_stochastic(T) + +p = approxeq(T, mk_stochastic(T)); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/isemptycell.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/isemptycell.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +function E = isemptycell(C) +% ISEMPTYCELL Apply the isempty function to each element of a cell array +% E = isemptycell(C) +% +% This is equivalent to E = cellfun('isempty', C), +% where cellfun is a function built-in to matlab version 5.3 or newer. + +if 0 % all(version('-release') >= 12) + E = cellfun('isempty', C); +else + E = zeros(size(C)); + for i=1:prod(size(C)) + E(i) = isempty(C{i}); + end + E = logical(E); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/isequalKPM.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/isequalKPM.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,7 @@ +function p = isequalKPM(a,b) + +if isempty(a) & isempty(b) + p = 1; +else + p = isequal(a,b); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/isposdef.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/isposdef.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,9 @@ +function b = isposdef(a) +% ISPOSDEF Test for positive definite matrix. +% ISPOSDEF(A) returns 1 if A is positive definite, 0 otherwise. +% Using chol is much more efficient than computing eigenvectors. + +% From Tom Minka's lightspeed toolbox + +[R,p] = chol(a); +b = (p == 0); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/isscalarBNT.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/isscalarBNT.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function p = isscalarBNT(v) +% ISSCALAR Returns 1 if all dimensions have size 1. +% p = isscalar(v) + +p = (prod(size(v))==1); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/isvectorBNT.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/isvectorBNT.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,9 @@ +function p = isvectorBNT(v) +% ISVECTOR Returns 1 if all but one dimension have size 1. +% p = isvector(v) +% +% Example: isvector(rand(1,2,1)) = 1, isvector(rand(2,2)) = 0. + +s=size(v); +p = (ndims(v)<=2) & (s(1) == 1 | s(2) == 1); +%p = sum( size(v) > 1) <= 1; % Peter Acklam's solution diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/junk.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/junk.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,67 @@ + + m = mxGetM(prhs[0]); + n = mxGetN(prhs[0]); + pr = mxGetPr(prhs[0]); + pi = mxGetPi(prhs[0]); + cmplx = (pi == NULL ? 0 : 1); + + /* Allocate space for sparse matrix. + * NOTE: Assume at most 20% of the data is sparse. Use ceil + * to cause it to round up. + */ + + percent_sparse = 0.2; + nzmax = (int)ceil((double)m*(double)n*percent_sparse); + + plhs[0] = mxCreateSparse(m,n,nzmax,cmplx); + sr = mxGetPr(plhs[0]); + si = mxGetPi(plhs[0]); + irs = mxGetIr(plhs[0]); + jcs = mxGetJc(plhs[0]); + + /* Copy nonzeros. */ + k = 0; + isfull = 0; + for (j = 0; (j < n); j++) { + int i; + jcs[j] = k; + for (i = 0; (i < m); i++) { + if (IsNonZero(pr[i]) || (cmplx && IsNonZero(pi[i]))) { + + /* Check to see if non-zero element will fit in + * allocated output array. If not, increase + * percent_sparse by 10%, recalculate nzmax, and augment + * the sparse array. + */ + if (k >= nzmax) { + int oldnzmax = nzmax; + percent_sparse += 0.1; + nzmax = (int)ceil((double)m*(double)n*percent_sparse); + + /* Make sure nzmax increases atleast by 1. */ + if (oldnzmax == nzmax) + nzmax++; + + mxSetNzmax(plhs[0], nzmax); + mxSetPr(plhs[0], mxRealloc(sr, nzmax*sizeof(double))); + if (si != NULL) + mxSetPi(plhs[0], mxRealloc(si, nzmax*sizeof(double))); + mxSetIr(plhs[0], mxRealloc(irs, nzmax*sizeof(int))); + + sr = mxGetPr(plhs[0]); + si = mxGetPi(plhs[0]); + irs = mxGetIr(plhs[0]); + } + sr[k] = pr[i]; + if (cmplx) { + si[k] = pi[i]; + } + irs[k] = i; + k++; + } + } + pr += m; + pi += m; + } + jcs[n] = k; +} diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/loadcell.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/loadcell.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,153 @@ +function [lc,dflag,dattype]=loadcell(fname,delim,exclusions,options); +%function [lc,dflag,numdata]=loadcell(fname,delim,exclusions); +% +% loadcell loads a cell array with character delimited +% data, which can have variable length lines and content. +% Numeric values are converted from string to double +% unless options is a string containing 'string'. +% +% loadcell is for use with small datasets. It is not optimised +% for large datasets. +% +% fname is the filename to be loaded +% +% delim is/are the relevant delimiter(s). If char(10) is included +% newlines are simply treated as delimiters and a 1-d array is created. +% +% exclusions are the set of characters to be treated as paired +% braces: line ends or delimiters within braces are ignored. +% braces are single characters and any brace can pair with +% any other brace: no type pair checking is done. +% +% options can be omitted or can contain 'string' if no numeric +% conversion is required, 'single' if multiple adjacent seperators +% should not be treated as one, 'free' if all linefeeds should be stripped +% first and 'empty2num' if empty fields are to be treated as numeric +% zeros rather than an empty character set. Combine options using +% concatenation. +% +% lc is a cell array containing the loaded data. +% +% dflag is a set of flags denoting the (i,j) values where data was entered +% dflag(i,j)=1 implies lc(i,j) was loaded from the data, and not just set +% to empty, say, by default. +% +% numdata is an array numdata(i,j)=NaN implies +% lc(i,j) is a string, otherwise it stores the number at i,j. +% This will occur regardless of whether the 'string' option is set. +% +% lc will return -1 if the file is not found or could not be +% opened. +% +% Hint: numdata+(1/dflag-1) provides a concise descriptor for the numeric data +% Inf=not loaded +% NaN=was string or empty set. +% otherwise numeric +% +% EXAMPLE +% +%[a,b]=loadcell('resultsfile',[',' char(9)],'"','single-string'); +% will load file 'resultsfile' into variable a, treating any of tab or +% comma as delimiters. Delimiters or carriage returns lying +% between two double inverted commas will be ignored. Two adjacent delimiters +% will count twice, and all data will be kept as a string. +% +% Note: in space-separated data 'single' would generally be omitted, +% wheras in comma-seperated data it would be included. +% +% Note the exclusion characters will remain in the final data, and any data +% contained within or containing exclusion characters will not be +% converted to numerics. +% +% (c) Amos Storkey 2002 +% v b160702 + +% MATLAB is incapable of loading variable length lines or variable type values +% with a whole file command under the standard library sets. This mfile +% fills that gap. +if (nargin<4) + options=' '; +end; +dflag = []; +%Open file +fid=fopen(fname,'rt'); +%Cannot open: return -1 +if (fid<0) + lc=-1; +else + fullfile=fread(fid,'uchar=>char')'; + %Strip LF if free is set + if ~isempty(findstr(options,'free')) + fullfile=strrep(fullfile,char(10),''); + end; + %Find all delimiters + delimpos=[]; + for s=1:length(delim) + delimpos=[delimpos find(fullfile==delim(s))]; + end + %Find all eol + endpos=find(fullfile==char(10)); + endpos=setdiff(endpos,delimpos); + %find all exclusions + xclpos=[]; + for s=1:length(exclusions); + xclpos=[xclpos find(fullfile==exclusions(s))]; + end + sort(xclpos); + xclpos=[xclpos(1:2:end-1);xclpos(2:2:end)]; + %Combine eol and delimiters + jointpos=union(delimpos,endpos); + t=1; + %Remove delim/eol within exclusion pairs + removedelim=[]; + for s=1:length(jointpos) + if any((jointpos(s)>xclpos(1,:)) & (jointpos(s)a2 +% If a1 ~ a2, and a1>a2, then e^(a2-a1) is exp(small negative number), +% which can be computed without underflow. + +% Same as logsumexp, except we assume a is a vector. +% This avoids a call to repmat, which takes 50% of the time! + +a = a(:)'; % make row vector +m = max(a); +b = a - m*ones(1,length(a)); +s = m + log(sum(exp(b))); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/mahal2conf.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/mahal2conf.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,60 @@ +% MAHAL2CONF - Translates a Mahalanobis distance into a confidence +% interval. Consider a multivariate Gaussian +% distribution of the form +% +% p(x) = 1/sqrt((2 * pi)^d * det(C)) * exp((-1/2) * MD(x, m, inv(C))) +% +% where MD(x, m, P) is the Mahalanobis distance from x +% to m under P: +% +% MD(x, m, P) = (x - m) * P * (x - m)' +% +% A particular Mahalanobis distance k identifies an +% ellipsoid centered at the mean of the distribution. +% The confidence interval associated with this ellipsoid +% is the probability mass enclosed by it. +% +% If X is an d dimensional Gaussian-distributed vector, +% then the Mahalanobis distance of X is distributed +% according to the Chi-squared distribution with d +% degrees of freedom. Thus, the confidence interval is +% determined by integrating the chi squared distribution +% up to the Mahalanobis distance of the measurement. +% +% Usage: +% +% c = mahal2conf(m, d); +% +% Inputs: +% +% m - the Mahalanobis radius of the ellipsoid +% d - the number of dimensions of the Gaussian distribution +% +% Outputs: +% +% c - the confidence interval, i.e., the fraction of +% probability mass enclosed by the ellipsoid with the +% supplied Mahalanobis distance +% +% See also: CONF2MAHAL + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +function c = mahal2conf(m, d) + +c = chi2cdf(m, d); \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/marg_table.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/marg_table.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,27 @@ +function smallT = marg_table(bigT, bigdom, bigsz, onto, maximize) +% MARG_TABLE Marginalize a table +% smallT = marg_table(bigT, bigdom, bigsz, onto, maximize) + +if nargin < 5, maximize = 0; end + + +smallT = myreshape(bigT, bigsz); % make sure it is a multi-dim array +sum_over = mysetdiff(bigdom, onto); +ndx = find_equiv_posns(sum_over, bigdom); +if maximize + for i=1:length(ndx) + smallT = max(smallT, [], ndx(i)); + end +else + for i=1:length(ndx) + smallT = sum(smallT, ndx(i)); + end +end + + +ns = zeros(1, max(bigdom)); +%ns(bigdom) = mysize(bigT); % ignores trailing dimensions of size 1 +ns(bigdom) = bigsz; + +smallT = squeeze(smallT); % remove all dimensions of size 1 +smallT = myreshape(smallT, ns(onto)); % put back relevant dims of size 1 diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/marginalize_table.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/marginalize_table.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,50 @@ +function smallT = marginalize_table(bigT, bigdom, bigsz, onto, maximize) +% MARG_TABLE Marginalize a table +% function smallT = marginalize_table(bigT, bigdom, bigsz, onto, maximize) + +% Like marg_table in BNT, except we do not assume the domains are sorted + +if nargin < 5, maximize = 0; end + + +smallT = myreshape(bigT, bigsz); % make sure it is a multi-dim array +sum_over = mysetdiff(bigdom, onto); +ndx = find_equiv_posns(sum_over, bigdom); +if maximize + for i=1:length(ndx) + smallT = max(smallT, [], ndx(i)); + end +else + for i=1:length(ndx) + smallT = sum(smallT, ndx(i)); + end +end + + +ns = zeros(1, max(bigdom)); +%ns(bigdom) = mysize(bigT); % ignores trailing dimensions of size 1 +ns(bigdom) = bigsz; + +% If onto has a different ordering than bigdom, the following +% will produce the wrong results + +%smallT = squeeze(smallT); % remove all dimensions of size 1 +%smallT = myreshape(smallT, ns(onto)); % put back relevant dims of size 1 + +% so permute dimensions to match desired ordering (as specified by onto) + + +% like find_equiv_posns, but keeps ordering +outdom = [onto sum_over]; +for i=1:length(outdom) + j = find(bigdom==outdom(i)); + match(i) = j; +end +outdom = [onto sum_over]; +for i=1:length(outdom) + j = find(bigdom==outdom(i)); + match(i) = j; +end +if match ~= 1 + smallT = permute(smallT, match); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/matprint.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/matprint.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,36 @@ +% MATPRINT - prints a matrix with specified format string +% +% Usage: matprint(a, fmt, fid) +% +% a - Matrix to be printed. +% fmt - C style format string to use for each value. +% fid - Optional file id. +% +% Eg. matprint(a,'%3.1f') will print each entry to 1 decimal place + +% Peter Kovesi +% School of Computer Science & Software Engineering +% The University of Western Australia +% pk @ csse uwa edu au +% http://www.csse.uwa.edu.au/~pk +% +% March 2002 + +function matprint(a, fmt, fid) + + if nargin < 3 + fid = 1; + end + + [rows,cols] = size(a); + + % Construct a format string for each row of the matrix consisting of + % 'cols' copies of the number formating specification + fmtstr = []; + for c = 1:cols + fmtstr = [fmtstr, ' ', fmt]; + end + fmtstr = [fmtstr '\n']; % Add a line feed + + fprintf(fid, fmtstr, a'); % Print the transpose of the matrix because + % fprintf runs down the columns of a matrix. \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/max_mult.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/max_mult.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,46 @@ +/* C mex version of max_mult.m in BPMRF2 directory */ +/* gcc -Wall -I/mit/matlab_v6.5/distrib/bin/glnx86 -c max_mult.c */ + +#include +#include "mex.h" + +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + int rows,cols,common,m,n,p; + double y1, y2; + double *arr1, *arr2, *arr3; + + + if (nrhs!=2 || nlhs>1) + mexErrMsgTxt("max_mult requires two inputs and one output"); + if (mxIsChar(prhs[0]) || mxIsClass(prhs[0], "sparse") || mxIsComplex(prhs[0]) + || mxIsChar(prhs[1]) || mxIsClass(prhs[1], "sparse") || mxIsComplex(prhs[1])) + mexErrMsgTxt("Inputs must be real, full, and nonstring"); + if (mxGetN(prhs[0])!=mxGetM(prhs[1])) + mexErrMsgTxt("The number of columns of A must be the same as the number of rows of x"); + + + arr1=mxGetPr(prhs[0]); + arr2=mxGetPr(prhs[1]); + p=mxGetN(prhs[0]); + m=mxGetM(prhs[0]); + n=mxGetN(prhs[1]); + plhs[0]=mxCreateDoubleMatrix(m, n, mxREAL); + arr3=mxMalloc(m*n*sizeof(double)); + + for (rows=0; rowsy1) + y1=y2; + } + arr3[rows+cols*m]=y1; + } + + mxSetPr(plhs[0], arr3); + +} diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/max_mult.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/max_mult.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,21 @@ +function y=max_mult(A,x) +% MAX_MULT Like matrix multiplication, but sum gets replaced by max +% function y=max_mult(A,x) y(i) = max_j A(i,j) x(j) + +%X=ones(size(A,1),1) * x(:)'; % X(j,i) = x(i) +%y=max(A.*X, [], 2); + +% This is faster +if size(x,2)==1 + X=x*ones(1,size(A,1)); % X(i,j) = x(i) + y=max(A'.*X)'; +else +%this works for arbitrarily sized A and x (but is ugly, and slower than above) + X=repmat(x, [1 1 size(A,1)]); + B=repmat(A, [1 1 size(x,2)]); + C=permute(B,[2 3 1]); + y=permute(max(C.*X),[3 2 1]); +% this is even slower, as is using squeeze instead of permute +% Y=permute(X, [3 1 2]); +% y=permute(max(Y.*B, [], 2), [1 3 2]); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/mexutil.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/mexutil.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,43 @@ +#include "mexutil.h" + +/* Functions to create uninitialized arrays. */ + +mxArray *mxCreateNumericArrayE(int ndim, const int *dims, + mxClassID class, mxComplexity ComplexFlag) +{ + mxArray *a; + int i, *dims1 = mxMalloc(ndim*sizeof(int)); + size_t sz = 1; + for(i=0;i 0 + h = hh; +end + +%-------------------------------------------------------------- +%Parse Inputs Function + +function [I,map] = parse_inputs(varargin) + +% initialize variables +map = []; + +iptchecknargin(1,2,nargin,mfilename); +iptcheckinput(varargin{1},{'uint8' 'double' 'uint16' 'logical' 'single' ... + 'int16'},{},mfilename, 'I, BW, or RGB',1); +I = varargin{1}; + +if nargin==2 + if isa(I,'int16') + eid = sprintf('Images:%s:invalidIndexedImage',mfilename); + msg1 = 'An indexed image can be uint8, uint16, double, single, or '; + msg2 = 'logical.'; + error(eid,'%s %s',msg1, msg2); + end + map = varargin{2}; + iptcheckinput(map,{'double'},{},mfilename,'MAP',1); + if ((size(map,1) == 1) && (prod(map) == numel(I))) + % MONTAGE(D,[M N P]) OBSOLETE + eid = sprintf('Images:%s:obsoleteSyntax',mfilename); + msg1 = 'MONTAGE(D,[M N P]) is an obsolete syntax.'; + msg2 = 'Use multidimensional arrays to represent multiframe images.'; + error(eid,'%s\n%s',msg1,msg2); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/montageKPM2.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/montageKPM2.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,69 @@ +function montageKPM2(data) +% data(y,x,b,f) or data(y,x,f) +% can be double - uses imagesc to display, not imshow +% based on imaqmontage + +if ndims(data)==3 + nr = size(data,1); nc = size(data,2); Npatches = size(data,3); Nbands = 1; + data = reshape(data, [nr nc Nbands Npatches]); +else + nr = size(data,1); nc = size(data,2); Nbands = size(data,3); Npatches = size(data,4); +end +nativeVal = data(1, 1); +dataOrig = data; + +%put a black border around them for display purposes +border = 5; +bgColor = min(data(:)); +%bgColor = max(data(:)); +data = bgColor*ones(nr+2*border, nc+2*border, Nbands, Npatches, class(data)); +data(border+1:end-border, border+1:end-border, :, :) = dataOrig; + +[width, height, bands, nFrames] = size(data); + +% Determine the number of axis rows and columns. +axCols = sqrt(nFrames); +if (axCols<1) + % In case we have a slim image. + axCols = 1; +end +axRows = nFrames/axCols; +if (ceil(axCols)-axCols) < (ceil(axRows)-axRows), + axCols = ceil(axCols); + axRows = ceil(nFrames/axCols); +else + axRows = ceil(axRows); + axCols = ceil(nFrames/axRows); +end + +% Size the storage to hold all frames. +storage = repmat(nativeVal, [axRows*width, axCols*height, bands, 1]); + +% Fill the storage up with data pixels. +rows = 1:width; +cols = 1:height; +for i=0:axRows-1, + for j=0:axCols-1, + k = j+i*axCols+1; + if k<=nFrames, + storage(rows+i*width, cols+j*height, :) = data(:,:,:,k); + else + break; + end + end +end + + +% Display the tiled frames nicely and +% pop the window forward. +im = imagesc(storage); + +ax = get(im, 'Parent'); +fig = get(ax, 'Parent'); +set(ax, 'XTick', [], 'YTick', []) +figure(fig) + +% If working with single band images, update the colormap. +if 0 % bands==1, + colormap(gray); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/montageKPM3.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/montageKPM3.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function montageKPM3(data) +% data{f}(y,x,b) - each frame can have a different size (can can even be empty) + +data2 = cell2matPad(data); +montageKPM2(data2) diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/mult_by_table.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/mult_by_table.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,7 @@ +function bigT = mult_by_table(bigT, bigdom, bigsz, smallT, smalldom, smallsz) +% MULT_BY_TABLE +% bigT = mult_by_table(bigT, bigdom, bigsz, smallT, smalldom, smallsz) +% + +Ts = extend_domain_table(smallT, smalldom, smallsz, bigdom, bigsz); +bigT(:) = bigT(:) .* Ts(:); % must have bigT(:) on LHS to preserve shape diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/myintersect.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/myintersect.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,28 @@ +function C = myintersect(A,B) +% MYINTERSECT Intersection of two sets of positive integers (much faster than built-in intersect) +% C = myintersect(A,B) + +A = A(:)'; B = B(:)'; + +if isempty(A) + ma = 0; +else + ma = max(A); +end + +if isempty(B) + mb = 0; +else + mb = max(B); +end + +if ma==0 | mb==0 + C = []; +else + %bits = sparse(1, max(ma,mb)); + bits = zeros(1, max(ma,mb)); + bits(A) = 1; + C = B(logical(bits(B))); +end + +%sum( bitget( bitand( cliquesb(i), cliquesb(j) ), 1:52 ) ); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/myismember.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/myismember.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,18 @@ +function p = myismember(a,A) +% MYISMEMBER Is 'a' an element of a set of positive integers? (much faster than built-in ismember) +% p = myismember(a,A) + +%if isempty(A) | a < min(A) | a > max(A) % slow + +if length(A)==0 + p = 0; +elseif a < min(A) + p = 0; +elseif a > max(A) + p = 0; +else + bits = zeros(1, max(A)); + bits(A) = 1; + p = bits(a); +end +p = logical(p); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/myones.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/myones.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,11 @@ +function T = myones(sizes) +% MYONES Like the built-in ones, except myones(k) produces a k*1 vector instead of a k*k matrix, +% T = myones(sizes) + +if length(sizes)==0 + T = 1; +elseif length(sizes)==1 + T = ones(sizes, 1); +else + T = ones(sizes(:)'); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/myplot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/myplot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,7 @@ +colors = ['r' 'b' 'k' 'g' 'c' 'y' 'm' ... + 'r' 'b' 'k' 'g' 'c' 'y' 'm']; +symbols = ['o' 'x' 's' '>' '<' '^' 'v' ... + '*' 'p' 'h' '+' 'd' 'o' 'x']; +for i=1:length(colors) + styles{i} = sprintf('-%s%s', colors(i), symbols(i)); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/myrand.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/myrand.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,12 @@ +function T = myrand(sizes) +% MYRAND Like the built-in rand, except myrand(k) produces a k*1 vector instead of a k*k matrix, +% T = myrand(sizes) + +if length(sizes)==0 + warning('myrand[]'); + T = rand(1,1); +elseif length(sizes)==1 + T = rand(sizes, 1); +else + T = rand(sizes); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/myrepmat.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/myrepmat.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,9 @@ +function T = myrepmat(T, sizes) +% MYREPMAT Like the built-in repmat, except myrepmat(T,n) == repmat(T,[n 1]) +% T = myrepmat(T, sizes) + +if length(sizes)==1 + T = repmat(T, [sizes 1]); +else + T = repmat(T, sizes(:)'); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/myreshape.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/myreshape.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,11 @@ +function T = myreshape(T, sizes) +% MYRESHAPE Like the built-in reshape, except myreshape(T,n) == reshape(T,[n 1]) +% T = myreshape(T, sizes) + +if length(sizes)==0 + return; +elseif length(sizes)==1 + T = reshape(T, [sizes 1]); +else + T = reshape(T, sizes(:)'); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/mysetdiff.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/mysetdiff.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,19 @@ +function C = mysetdiff(A,B) +% MYSETDIFF Set difference of two sets of positive integers (much faster than built-in setdiff) +% C = mysetdiff(A,B) +% C = A \ B = { things in A that are not in B } +% +% Original by Kevin Murphy, modified by Leon Peshkin + +if isempty(A) + C = []; + return; +elseif isempty(B) + C = A; + return; +else % both non-empty + bits = zeros(1, max(max(A), max(B))); + bits(A) = 1; + bits(B) = 0; + C = A(logical(bits(A))); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/mysize.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/mysize.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +function sz = mysize(M) +% MYSIZE Like the built-in size, except it returns n if M is a vector of length n, and 1 if M is a scalar. +% sz = mysize(M) +% +% The behavior is best explained by examples +% - M = rand(1,1), mysize(M) = 1, size(M) = [1 1] +% - M = rand(2,1), mysize(M) = 2, size(M) = [2 1] +% - M = rand(1,2), mysize(M) = 2, size(M) = [1 2] +% - M = rand(2,2,1), mysize(M) = [2 2], size(M) = [2 2] +% - M = rand(1,2,1), mysize(M) = 2, size(M) = [1 2] + +if isvectorBNT(M) + sz = length(M); +else + sz = size(M); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/mysubset.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/mysubset.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,11 @@ +function p=mysubset(small,large) +% MYSUBSET Is the small set of +ve integers a subset of the large set? +% p = mysubset(small, large) + +% Surprisingly, this is not built-in. + +if isempty(small) + p = 1; % isempty(large); +else + p = length(myintersect(small,large)) == length(small); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/mysymsetdiff.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/mysymsetdiff.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,29 @@ +function C = mysymsetdiff(A,B) +% MYSYMSETDIFF Symmetric set difference of two sets of positive integers (much faster than built-in setdiff) +% C = mysetdiff(A,B) +% C = (A\B) union (B\A) = { things that A and B don't have in common } + +if isempty(A) + ma = 0; +else + ma = max(A); +end + +if isempty(B) + mb = 0; +else + mb = max(B); +end + +if ma==0 + C = B; +elseif mb==0 + C = A; +else % both non-empty + m = max(ma,mb); + bitsA = sparse(1, m); + bitsA(A) = 1; + bitsB = sparse(1, m); + bitsB(B) = 1; + C = find(xor(bitsA, bitsB)); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/myunion.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/myunion.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,29 @@ +function C = myunion(A,B) +% MYUNION Union of two sets of positive integers (much faster than built-in union) +% C = myunion(A,B) + +if isempty(A) + ma = 0; +else + ma = max(A); +end + +if isempty(B) + mb = 0; +else + mb = max(B); +end + +if ma==0 & mb==0 + C = []; +elseif ma==0 & mb>0 + C = B; +elseif ma>0 & mb==0 + C = A; +else + %bits = sparse(1, max(ma,mb)); + bits = zeros(1, max(ma,mb)); + bits(A) = 1; + bits(B) = 1; + C = find(bits); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/nchoose2.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/nchoose2.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,35 @@ +function c = nchoose2(v, f) +%NCHOOSE2 All combinations of N elements taken two at a time. +% +% NCHOOSE2(1:N) or NCHOOSEK(V) where V is a vector of length N, +% produces a matrix with N*(N-1)/2 rows and K columns. Each row of +% the result has K of the elements in the vector V. +% +% NCHOOSE2(N,FLAG) is the same as NCHOOSE2(1:N) but faster. +% +% NCHOOSE2(V) is much faster than NCHOOSEK(V,2). +% +% See also NCHOOSEK, PERMS. + +% Author: Peter J. Acklam +% Time-stamp: 2000-03-03 13:03:59 +% E-mail: jacklam@math.uio.no +% URL: http://www.math.uio.no/~jacklam + + nargs = nargin; + if nargs < 1 + error('Not enough input arguments.'); + elseif nargs == 1 + v = v(:); + n = length(v); + elseif nargs == 2 + n = v; + else + error('Too many input arguments.'); + end + + [ c(:,2), c(:,1) ] = find( tril( ones(n), -1 ) ); + + if nargs == 1 + c = v(c); + end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/ncols.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/ncols.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,4 @@ +function c = cols(x) +% COLS The number of columns. +% COLS is a more readable alternative to size(x,2). +c = size(x,2); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/nonmaxsup.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/nonmaxsup.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,42 @@ +% NONMAXSUP - Non-maximal Suppression +% +% Usage: cim = nonmaxsup(im, radius) +% +% Arguments: +% im - image to be processed. +% radius - radius of region considered in non-maximal +% suppression (optional). Typical values to use might +% be 1-3. Default is 1. +% +% Returns: +% cim - image with pixels that are not maximal within a +% square neighborhood zeroed out. + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function cim = nonmaxsup(m, radius) + if (nargin == 1) radius = 1; end + % Extract local maxima by performing a grey scale morphological + % dilation and then finding points in the corner strength image that + % match the dilated image and are also greater than the threshold. + sze = 2 * radius + 1; % Size of mask. + mx = ordfilt2(m, sze^2, ones(sze)); % Grey-scale dilate. + cim = sparse(m .* (m == mx)); + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/normalise.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/normalise.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,34 @@ +function [M, z] = normalise(A, dim) +% NORMALISE Make the entries of a (multidimensional) array sum to 1 +% [M, c] = normalise(A) +% c is the normalizing constant +% +% [M, c] = normalise(A, dim) +% If dim is specified, we normalise the specified dimension only, +% otherwise we normalise the whole array. + +if nargin < 2 + z = sum(A(:)); + % Set any zeros to one before dividing + % This is valid, since c=0 => all i. A(i)=0 => the answer should be 0/1=0 + s = z + (z==0); + M = A / s; +elseif dim==1 % normalize each column + z = sum(A); + s = z + (z==0); + %M = A ./ (d'*ones(1,size(A,1)))'; + M = A ./ repmatC(s, size(A,1), 1); +else + % Keith Battocchi - v. slow because of repmat + z=sum(A,dim); + s = z + (z==0); + L=size(A,dim); + d=length(size(A)); + v=ones(d,1); + v(dim)=L; + %c=repmat(s,v); + c=repmat(s,v'); + M=A./c; +end + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/normaliseC.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/normaliseC.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,29 @@ +/* C mex version of normalise.m in misc directory */ + +#include "mex.h" + +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + double *T, *sum_ptr, sum; + int i, N; + + plhs[0] = mxDuplicateArray(prhs[0]); + T = mxGetPr(plhs[0]); + if(mxIsSparse(plhs[0])) N = mxGetJc(plhs[0])[mxGetN(plhs[0])]; + else N = mxGetNumberOfElements(plhs[0]); + + plhs[1] = mxCreateDoubleMatrix(1, 1, mxREAL); + sum_ptr = mxGetPr(plhs[1]); + + sum = 0; + for (i = 0; i < N; i++) { + sum += *T++; + } + T = mxGetPr(plhs[0]); + if (sum > 0) { + for (i = 0; i < N; i++) { + *T++ /= sum; + } + } + *sum_ptr = sum; +} diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/normaliseC.dll Binary file toolboxes/FullBNT-1.0.7/KPMtools/normaliseC.dll has changed diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/normalize.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/normalize.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,34 @@ +function [M, z] = normalise(A, dim) +% NORMALISE Make the entries of a (multidimensional) array sum to 1 +% [M, c] = normalise(A) +% c is the normalizing constant +% +% [M, c] = normalise(A, dim) +% If dim is specified, we normalise the specified dimension only, +% otherwise we normalise the whole array. + +if nargin < 2 + z = sum(A(:)); + % Set any zeros to one before dividing + % This is valid, since c=0 => all i. A(i)=0 => the answer should be 0/1=0 + s = z + (z==0); + M = A / s; +elseif dim==1 % normalize each column + z = sum(A); + s = z + (z==0); + %M = A ./ (d'*ones(1,size(A,1)))'; + M = A ./ repmatC(s, size(A,1), 1); +else + % Keith Battocchi - v. slow because of repmat + z=sum(A,dim); + s = z + (z==0); + L=size(A,dim); + d=length(size(A)); + v=ones(d,1); + v(dim)=L; + %c=repmat(s,v); + c=repmat(s,v'); + M=A./c; +end + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/nrows.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/nrows.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,4 @@ +function r = rows(x) +% ROWS The number of rows. +% ROWS is a more readable alternative to size(x,1). +r = size(x,1); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/num2strcell.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/num2strcell.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +function c = num2strcell(n, format) +% num2strcell Convert vector of numbers to cell array of strings +% function c = num2strcell(n, format) +% +% If format is omitted, we use +% c{i} = sprintf('%d', n(i)) + +if nargin < 2, format = '%d'; end + +N = length(n); +c = cell(1,N); +for i=1:N + c{i} = sprintf(format, n(i)); +end + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/optimalMatching.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/optimalMatching.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,90 @@ +% MATCH - Solves the weighted bipartite matching (or assignment) +% problem. +% +% Usage: a = match(C); +% +% Arguments: +% C - an m x n cost matrix; the sets are taken to be +% 1:m and 1:n; C(i, j) gives the cost of matching +% items i (of the first set) and j (of the second set) +% +% Returns: +% +% a - an m x 1 assignment vector, which gives the +% minimum cost assignment. a(i) is the index of +% the item of 1:n that was matched to item i of +% 1:m. If item i (of 1:m) was not matched to any +% item of 1:n, then a(i) is zero. + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [a] = optimalMatching(C) + +% Trivial cases: +[p, q] = size(C); +if (p == 0) + a = []; + return; +elseif (q == 0) + a = zeros(p, 1); + return; +end + + +if 0 +% First, reduce the problem by making easy optimal matches. If two +% elements agree that they are the best match, then match them up. +[x, a] = min(C, [], 2); +[y, b] = min(C, [], 1); +u = find(1:p ~= b(a(:))); +a(u) = 0; +v = find(1:q ~= a(b(:))'); +C = C(u, v); +if (isempty(C)) return; end +end + +% Get the (new) size of the two sets, u and v. +[m, n] = size(C); + +%mx = realmax; +mx = 2*max(C(:)); +mn = -2*min(C(:)); +% Pad the affinity matrix to be square +if (m < n) + C = [C; mx * ones(n - m, n)]; +elseif (n < m) + C = [C, mx * ones(m, m - n)]; +end + +% Run the Hungarian method. First replace infinite values by the +% largest (or smallest) finite values. +C(find(isinf(C) & (C > 0))) = mx; +C(find(isinf(C) & (C < 0))) = mn; +%fprintf('running hungarian\n'); +[b, cost] = hungarian(C'); + +% Extract only the real assignments +ap = b(1:m)'; +ap(find(ap > n)) = 0; + +a = ap; +%% Incorporate this sub-assignment into the complete assignment +% k = find(ap); +% a(u(k)) = v(ap(k)); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/optimalMatchingTest.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/optimalMatchingTest.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,18 @@ +% Consider matching sources to detections + +% s1 d2 +% s2 d3 +% d1 + +a = optimalMatching([52;0.01]) + +% sources(:,i) = [x y] coords +sources = [0.1 0.7; 0.6 0.4]'; +detections = [0.2 0.2; 0.2 0.8; 0.7 0.1]'; +dst = sqdist(sources, detections) + +% a = [2 3] which means s1-d2, s2-d3 +a = optimalMatching(dst) + +% a = [0 1 2] which means d1-0, d2-s1, d3-s2 +a = optimalMatching(dst') diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/partitionData.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/partitionData.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,25 @@ +function varargout = partitionData(Ndata, varargin) +% PARTITIONDATA Partition a vector of indices into random sets +% [a,b,c,...] = partitionData(N, 0.3, 0.2, 0.5, ...) +% +% Examples: +% [a,b,c]=partitionData(105,0.3,0.2,0.5); +% a= 1:30, b=32:52, c=52:105 (last bin gets all the left over) + +Npartitions = length(varargin); +perm = randperm(Ndata); +%perm = 1:Ndata; +ndx = 1; +for i=1:Npartitions + pc(i) = varargin{i}; + Nbin(i) = fix(Ndata*pc(i)); + low(i) = ndx; + if i==Npartitions + high(i) = Ndata; + else + high(i) = low(i)+Nbin(i)-1; + end + varargout{i} = perm(low(i):high(i)); + ndx = ndx+Nbin(i); +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/partition_matrix_vec.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/partition_matrix_vec.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,21 @@ +function [m1, m2, K11, K12, K21, K22] = partition_matrix_vec(m, K, n1, n2, bs) +% PARTITION_MATRIX_VEC Partition a vector and matrix into blocks. +% [m1, m2, K11, K12, K21, K22] = partition_matrix_vec(m, K, blocks1, blocks2, bs) +% +% bs(i) = block size of i'th node +% +% Example: +% n1 = [6 8], n2 = [5], bs = [- - - - 2 1 - 2], where - = don't care +% m = [0.1 0.2 0.3 0.4 0.5], K = some 5*5 matrix, +% So E[X5] = [0.1 0.2], E[X6] = [0.3], E[X8] = [0.4 0.5] +% m1 = [0.3 0.4 0.5], m2 = [0.1 0.2]; + +dom = myunion(n1, n2); +n1i = block(find_equiv_posns(n1, dom), bs(dom)); +n2i = block(find_equiv_posns(n2, dom), bs(dom)); +m1 = m(n1i); +m2 = m(n2i); +K11 = K(n1i, n1i); +K12 = K(n1i, n2i); +K21 = K(n2i, n1i); +K22 = K(n2i, n2i); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/pca_kpm.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/pca_kpm.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,32 @@ +function [pc_vec]=pca_kpm(features,N, method); +% PCA_KPM Compute top N principal components using eigs or svd. +% [pc_vec]=pca_kpm(features,N) +% +% features(:,i) is the i'th example - each COLUMN is an observation +% pc_vec(:,j) is the j'th basis function onto which you should project the data +% using pc_vec' * features + +[d ncases] = size(features); +fm=features-repmat(mean(features,2), 1, ncases); + + +if method==1 % d*d < d*ncases + fprintf('pca_kpm eigs\n'); + options.disp = 0; + C = cov(fm'); % d x d matrix + [pc_vec, evals] = eigs(C, N, 'LM', options); +else + % [U,D,V] = SVD(fm), U(:,i)=evec of fm fm', V(:,i) = evec of fm' fm + fprintf('pca_kpm svds\n'); + [U,D,V] = svds(fm', N); + pc_vec = V; +end + +if 0 +X = randn(5,3); +X = X-repmat(mean(X),5,1); +C = X'*X; +C2=cov(X) +[U,D,V]=svd(X); +[V2,D2]=eig(X) +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/pca_netlab.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/pca_netlab.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,42 @@ +function [PCcoeff, PCvec] = pca(data, N) +%PCA Principal Components Analysis +% +% Description +% PCCOEFF = PCA(DATA) computes the eigenvalues of the covariance +% matrix of the dataset DATA and returns them as PCCOEFF. These +% coefficients give the variance of DATA along the corresponding +% principal components. +% +% PCCOEFF = PCA(DATA, N) returns the largest N eigenvalues. +% +% [PCCOEFF, PCVEC] = PCA(DATA) returns the principal components as well +% as the coefficients. This is considerably more computationally +% demanding than just computing the eigenvalues. +% +% See also +% EIGDEC, GTMINIT, PPCA +% + +% Copyright (c) Ian T Nabney (1996-2001) + +if nargin == 1 + N = size(data, 2); +end + +if nargout == 1 + evals_only = logical(1); +else + evals_only = logical(0); +end + +if N ~= round(N) | N < 1 | N > size(data, 2) + error('Number of PCs must be integer, >0, < dim'); +end + +% Find the sorted eigenvalues of the data covariance matrix +if evals_only + PCcoeff = eigdec(cov(data), N); +else + [PCcoeff, PCvec] = eigdec(cov(data), N); +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/pick.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/pick.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,9 @@ +function [i,j] = pick(ndx) +% PICK Pick an entry at random from a vector +% function [i,j] = pick(ndx) +% +% i = ndx(j) for j ~ U(1:length(ndx)) + +dist = normalize(ones(1,length(ndx))); +j = sample_discrete(dist); +i = ndx(j); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotBox.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotBox.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function [h, ht] =plotBox(box, col, str) +% function h=plotBox(box, col, str) +% +% box = [xlow xhigh ylow yhigh] +% col = color (default - red) +% str = string printed at center (default '') + +if nargin < 2, col = 'r'; end +if nargin < 3, str = ''; end + +box = double(box); % fails on single + +h = plot([box(1) box(2) box(2) box(1) box(1)], [ box(3) box(3) box(4) box(4) box(3)]); +set(h, 'color', col); +set(h, 'linewidth', 2); +if ~isempty(str) + xc = mean(box(1:2)); + yc = mean(box(3:4)); + ht = text(xc, yc, str); +else + ht = []; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotColors.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotColors.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ +function styles = plotColors() + +colors = ['r' 'b' 'k' 'g' 'c' 'y' 'm' ... + 'r' 'b' 'k' 'g' 'c' 'y' 'm']; +symbols = ['o' 'x' '+' '>' '<' '^' 'v' ... + '*' 'p' 'h' 's' 'd' 'o' 'x']; +for i=1:length(colors) + styles{i} = sprintf('-%s%s', colors(i), symbols(i)); +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotROC.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotROC.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,26 @@ +function [falseAlarmRate, detectionRate, area, th] = plotROC(confidence, testClass, col, varargin) +% function [falseAlarmRate, detectionRate, area, th] = plotroc(confidence, testClass, color) + +if nargin < 3, col = []; end + +[scale01] = process_options(varargin, 'scale01', 1); + +[falseAlarmRate detectionRate area th] = computeROC(confidence, testClass); + +if ~isempty(col) + h=plot(falseAlarmRate, detectionRate, [col '-']); + %set(h, 'linewidth', 2); + ex = 0.05*max(falseAlarmRate); + ey = 0.05; + if scale01 + axis([0-ex max(falseAlarmRate)+ex 0-ey 1+ey]) + else + % zoom in on the top left corner + axis([0-ex max(falseAlarmRate)*0.5+ex 0.5-ey 1+ey]) + end + grid on + ylabel('detection rate') + %xlabel('# false alarms') + xlabel('false alarm rate') +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotROCkpm.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotROCkpm.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,69 @@ +function [falseAlarmRate, detectionRate, area, th] = plotROC(confidence, testClass, col, varargin) +% You pass the scores and the classes, and the function returns the false +% alarm rate and the detection rate for different points across the ROC. +% +% [faR, dR] = plotROC(score, class) +% +% faR (false alarm rate) is uniformly sampled from 0 to 1 +% dR (detection rate) is computed using the scores. +% +% class = 0 => target absent +% class = 1 => target present +% +% score is the output of the detector, or any other measure of detection. +% There is no plot unless you add a third parameter that is the color of +% the graph. For instance: +% [faR, dR] = plotROC(score, class, 'r') +% +% faR, dR are size 1x1250 + +if nargin < 3, col = []; end +[scale01] = process_options(varargin, 'scale01', 1); + +S = rand('state'); +rand('state',0); +confidence = confidence + rand(size(confidence))*10^(-10); +rand('state',S) + +ndxAbs = find(testClass==0); % absent +ndxPres = find(testClass==1); % present + +[th, j] = sort(confidence(ndxAbs)); +th = th(fix(linspace(1, length(th), 1250))); + +cAbs = confidence(ndxAbs); +cPres = confidence(ndxPres); +for t=1:length(th) + if length(ndxPres) == 0 + detectionRate(t) = 0; + else + detectionRate(t) = sum(cPres>=th(t)) / length(ndxPres); + end + if length(ndxAbs) == 0 + falseAlarmRate(t) = 0; + else + falseAlarmRate(t) = sum(cAbs>=th(t)) / length(ndxAbs); + end + + %detectionRate(t) = sum(confidence(ndxPres)>=th(t)) / length(ndxPres); + %falseAlarmRate(t) = sum(confidence(ndxAbs)>=th(t)) / length(ndxAbs); + %detections(t) = sum(confidence(ndxPres)>=th(t)); + %falseAlarms(t) = sum(confidence(ndxAbs)>=th(t)); +end + +area = sum(abs(falseAlarmRate(2:end) - falseAlarmRate(1:end-1)) .* detectionRate(2:end)); + +if ~isempty(col) + h=plot(falseAlarmRate, detectionRate, [col '-']); + %set(h, 'linewidth', 2); + e = 0.05; + if scale01 + axis([0-e 1+e 0-e 1+e]) + else + % zoom in on the top left corner + axis([0-e 0.5+e 0.5-e 1+e]) + end + grid on + ylabel('detection rate') + xlabel('false alarm rate') +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plot_axis_thru_origin.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plot_axis_thru_origin.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,3 @@ +function plot_axis_thru_origin() + +lnx=line(get(gca,'xlim'),[0 0]); lny=line([0 0],get(gca,'ylim')); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plot_ellipse.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plot_ellipse.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,19 @@ +% PLOT_ELLIPSE +% h=plot_ellipse(x,y,theta,a,b) +% +% This routine plots an ellipse with centre (x,y), axis lengths a,b +% with major axis at an angle of theta radians from the horizontal. + +% +% Author: P. Fieguth +% Jan. 98 +% +%http://ocho.uwaterloo.ca/~pfieguth/Teaching/372/plot_ellipse.m + +function h=plot_ellipse(x,y,theta,a,b) + +np = 100; +ang = [0:np]*2*pi/np; +R = [cos(theta) -sin(theta); sin(theta) cos(theta)]; +pts = [x;y]*ones(size(ang)) + R*[cos(ang)*a; sin(ang)*b]; +h=plot( pts(1,:), pts(2,:) ); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plot_matrix.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plot_matrix.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,48 @@ +function plot_matrix(G, bw) +% PLOT_MATRIX Plot a 2D matrix as a grayscale image, and label the axes +% +% plot_matrix(M) +% +% For 0/1 matrices (eg. adjacency matrices), use +% plot_matrix(M,1) + +if nargin < 2, bw = 0; end + +if 0 + imagesc(G) + %image(G) + %colormap([1 1 1; 0 0 0]); % black squares on white background + %colormap(gray) + grid on + n = length(G); + + % shift the grid lines so they don't intersect the squares + set(gca,'xtick',1.5:1:n); + set(gca,'ytick',1.5:1:n); + + % Turn off the confusing labels, which are fractional + % Ideally we could shift the labels to lie between the axis lines... +% set(gca,'xticklabel', []); +% set(gca,'yticklabel', []); +else + % solution provided by Jordan Rosenthal + % You can plot the grid lines manually: + % This uses the trick that a point with a value nan does not get plotted. + imagesc(G); + if bw + colormap([1 1 1; 0 0 0]); + end + n = length(G); + x = 1.5:1:n; + x = [ x; x; repmat(nan,1,n-1) ]; + y = [ 0.5 n+0.5 nan ].'; + y = repmat(y,1,n-1); + x = x(:); + y = y(:); + line(x,y,'linestyle',':','color','k'); + line(y,x,'linestyle',':','color','k'); + set(gca,'xtick',1:n) + set(gca,'ytick',1:n) +end + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plot_polygon.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plot_polygon.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,23 @@ +function out=plot_polygon(p, args, close_loop) +% PLOT_POLYGON +% function handle=plot_polygon(p, args, close_loop) +% p(1,i), p(2,i) are the x/y coords of point i. +% If non-empty, args are passed thru to the plot command. +% If close_loop = 1, connect the last point to the first + +% All rights reserved. Documentation updated April 1999. +% Matt Kawski. http://math.la.asu.edu/~kawski +% He calls it pplot + +if nargin < 2, args = []; end +if nargin < 3, close_loop = 0; end + +if close_loop + p = [p p(:,1)]; +end + +if isempty(args) + out=plot(p(1,:),p(2,:)); +else + out=plot(p(1,:),p(2,:),args); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotcov2.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotcov2.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,86 @@ +% PLOTCOV2 - Plots a covariance ellipse with major and minor axes +% for a bivariate Gaussian distribution. +% +% Usage: +% h = plotcov2(mu, Sigma[, OPTIONS]); +% +% Inputs: +% mu - a 2 x 1 vector giving the mean of the distribution. +% Sigma - a 2 x 2 symmetric positive semi-definite matrix giving +% the covariance of the distribution (or the zero matrix). +% +% Options: +% 'conf' - a scalar between 0 and 1 giving the confidence +% interval (i.e., the fraction of probability mass to +% be enclosed by the ellipse); default is 0.9. +% 'num-pts' - the number of points to be used to plot the +% ellipse; default is 100. +% +% This function also accepts options for PLOT. +% +% Outputs: +% h - a vector of figure handles to the ellipse boundary and +% its major and minor axes +% +% See also: PLOTCOV3 + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function h = plotcov2(mu, Sigma, varargin) + +if size(Sigma) ~= [2 2], error('Sigma must be a 2 by 2 matrix'); end +if length(mu) ~= 2, error('mu must be a 2 by 1 vector'); end + +[p, ... + n, ... + plot_opts] = process_options(varargin, 'conf', 0.9, ... + 'num-pts', 100); +h = []; +holding = ishold; +if (Sigma == zeros(2, 2)) + z = mu; +else + % Compute the Mahalanobis radius of the ellipsoid that encloses + % the desired probability mass. + k = conf2mahal(p, 2); + % The major and minor axes of the covariance ellipse are given by + % the eigenvectors of the covariance matrix. Their lengths (for + % the ellipse with unit Mahalanobis radius) are given by the + % square roots of the corresponding eigenvalues. + if (issparse(Sigma)) + [V, D] = eigs(Sigma); + else + [V, D] = eig(Sigma); + end + % Compute the points on the surface of the ellipse. + t = linspace(0, 2*pi, n); + u = [cos(t); sin(t)]; + w = (k * V * sqrt(D)) * u; + z = repmat(mu, [1 n]) + w; + % Plot the major and minor axes. + L = k * sqrt(diag(D)); + h = plot([mu(1); mu(1) + L(1) * V(1, 1)], ... + [mu(2); mu(2) + L(1) * V(2, 1)], plot_opts{:}); + hold on; + h = [h; plot([mu(1); mu(1) + L(2) * V(1, 2)], ... + [mu(2); mu(2) + L(2) * V(2, 2)], plot_opts{:})]; +end + +h = [h; plot(z(1, :), z(2, :), plot_opts{:})]; +if (~holding) hold off; end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotcov2New.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotcov2New.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,119 @@ +% PLOTCOV2 - Plots a covariance ellipsoid with axes for a bivariate +% Gaussian distribution. +% +% Usage: +% [h, s] = plotcov2(mu, Sigma[, OPTIONS]); +% +% Inputs: +% mu - a 2 x 1 vector giving the mean of the distribution. +% Sigma - a 2 x 2 symmetric positive semi-definite matrix giving +% the covariance of the distribution (or the zero matrix). +% +% Options: +% 'conf' - a scalar between 0 and 1 giving the confidence +% interval (i.e., the fraction of probability mass to +% be enclosed by the ellipse); default is 0.9. +% 'num-pts' - if the value supplied is n, then (n + 1)^2 points +% to be used to plot the ellipse; default is 20. +% 'label' - if non-empty, a string that will label the +% ellipsoid (default: []) +% 'plot-axes' - a 0/1 flag indicating if the ellipsoid's axes +% should be plotted (default: 1) +% 'plot-opts' - a cell vector of arguments to be handed to PLOT3 +% to contol the appearance of the axes, e.g., +% {'Color', 'g', 'LineWidth', 1}; the default is {} +% 'fill-color' - a color specifier; is this is not [], the +% covariance ellipse is filled with this color +% (default: []) +% +% Outputs: +% h - a vector of handles on the axis lines +% +% See also: PLOTCOV3 + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [h, s] = plotcov2New(mu, Sigma, varargin) + +h = []; +s = []; + +if size(Sigma) ~= [2 2], error('Sigma must be a 2 by 2 matrix'); end +if length(mu) ~= 2, error('mu must be a 2 by 1 vector'); end + +Sigma = checkpsd(Sigma); + +[p, ... + n, ... + label, ... + plot_axes, ... + plot_opts, ... + fill_color] = process_options(varargin, 'conf', 0.9, ... + 'num-pts', 20, ... + 'label', [], ... + 'plot-axes', 1, ... + 'plot-opts', {}, ... + 'fill-color', []); +holding = ishold; +% Compute the Mahalanobis radius of the ellipsoid that encloses +% the desired probability mass. +k = conf2mahal(p, 2); +% Scale the covariance matrix so the confidence region has unit +% Mahalanobis distance. +Sigma = Sigma * k; +% The axes of the covariance ellipse are given by the eigenvectors of +% the covariance matrix. Their lengths (for the ellipse with unit +% Mahalanobis radius) are given by the square roots of the +% corresponding eigenvalues. +[V, D] = eig(full(Sigma)); +V = real(V); +D = real(D); +D = abs(D); + +% Compute the points on the boundary of the ellipsoid. +t = linspace(0, 2*pi, n); +u = [cos(t(:))'; sin(t(:))']; +w = (V * sqrt(D)) * u; +z = repmat(mu(:), [1 n]) + w; +h = [h; plot(z(1, :), z(2, :), plot_opts{:})]; +if (~isempty(fill_color)) + s = patch(z(1, :), z(2, :), fill_color); +end + +% Plot the axes. +if (plot_axes) + hold on; + L = sqrt(diag(D)); + h = plot([mu(1); mu(1) + L(1) * V(1, 1)], ... + [mu(2); mu(2) + L(1) * V(2, 1)], plot_opts{:}); + h = [h; plot([mu(1); mu(1) + L(2) * V(1, 2)], ... + [mu(2); mu(2) + L(2) * V(2, 2)], plot_opts{:})]; +end + + +if (~isempty(label)) + th = text(mu(1), mu(2), label); + set(th, 'FontSize', 18); + set(th, 'FontName', 'Times'); + set(th, 'FontWeight', 'bold'); + set(th, 'FontAngle', 'italic'); + set(th, 'HorizontalAlignment', 'center'); +end + +if (~holding & plot_axes) hold off; end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotcov3.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotcov3.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,109 @@ +% PLOTCOV3 - Plots a covariance ellipsoid with axes for a trivariate +% Gaussian distribution. +% +% Usage: +% [h, s] = plotcov3(mu, Sigma[, OPTIONS]); +% +% Inputs: +% mu - a 3 x 1 vector giving the mean of the distribution. +% Sigma - a 3 x 3 symmetric positive semi-definite matrix giving +% the covariance of the distribution (or the zero matrix). +% +% Options: +% 'conf' - a scalar between 0 and 1 giving the confidence +% interval (i.e., the fraction of probability mass to +% be enclosed by the ellipse); default is 0.9. +% 'num-pts' - if the value supplied is n, then (n + 1)^2 points +% to be used to plot the ellipse; default is 20. +% 'plot-opts' - a cell vector of arguments to be handed to PLOT3 +% to contol the appearance of the axes, e.g., +% {'Color', 'g', 'LineWidth', 1}; the default is {} +% 'surf-opts' - a cell vector of arguments to be handed to SURF +% to contol the appearance of the ellipsoid +% surface; a nice possibility that yields +% transparency is: {'EdgeAlpha', 0, 'FaceAlpha', +% 0.1, 'FaceColor', 'g'}; the default is {} +% +% Outputs: +% h - a vector of handles on the axis lines +% s - a handle on the ellipsoid surface object +% +% See also: PLOTCOV2 + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [h, s] = plotcov3(mu, Sigma, varargin) + +if size(Sigma) ~= [3 3], error('Sigma must be a 3 by 3 matrix'); end +if length(mu) ~= 3, error('mu must be a 3 by 1 vector'); end + +[p, ... + n, ... + plot_opts, ... + surf_opts] = process_options(varargin, 'conf', 0.9, ... + 'num-pts', 20, ... + 'plot-opts', {}, ... + 'surf-opts', {}); +h = []; +holding = ishold; +if (Sigma == zeros(3, 3)) + z = mu; +else + % Compute the Mahalanobis radius of the ellipsoid that encloses + % the desired probability mass. + k = conf2mahal(p, 3); + % The axes of the covariance ellipse are given by the eigenvectors of + % the covariance matrix. Their lengths (for the ellipse with unit + % Mahalanobis radius) are given by the square roots of the + % corresponding eigenvalues. + if (issparse(Sigma)) + [V, D] = eigs(Sigma); + else + [V, D] = eig(Sigma); + end + if (any(diag(D) < 0)) + error('Invalid covariance matrix: not positive semi-definite.'); + end + % Compute the points on the surface of the ellipsoid. + t = linspace(0, 2*pi, n); + [X, Y, Z] = sphere(n); + u = [X(:)'; Y(:)'; Z(:)']; + w = (k * V * sqrt(D)) * u; + z = repmat(mu(:), [1 (n + 1)^2]) + w; + + % Plot the axes. + L = k * sqrt(diag(D)); + h = plot3([mu(1); mu(1) + L(1) * V(1, 1)], ... + [mu(2); mu(2) + L(1) * V(2, 1)], ... + [mu(3); mu(3) + L(1) * V(3, 1)], plot_opts{:}); + hold on; + h = [h; plot3([mu(1); mu(1) + L(2) * V(1, 2)], ... + [mu(2); mu(2) + L(2) * V(2, 2)], ... + [mu(3); mu(3) + L(2) * V(3, 2)], plot_opts{:})]; + h = [h; plot3([mu(1); mu(1) + L(3) * V(1, 3)], ... + [mu(2); mu(2) + L(3) * V(2, 3)], ... + [mu(3); mu(3) + L(3) * V(3, 3)], plot_opts{:})]; +end + +s = surf(reshape(z(1, :), [(n + 1) (n + 1)]), ... + reshape(z(2, :), [(n + 1) (n + 1)]), ... + reshape(z(3, :), [(n + 1) (n + 1)]), ... + surf_opts{:}); + +if (~holding) hold off; end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotgauss1d.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotgauss1d.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,9 @@ +function h = plotgauss1d(mu, sigma2) +% function h = plotgauss1d(mu, sigma^2) +% Example +% plotgauss1d(0,5); hold on; h=plotgauss1d(0,2);set(h,'color','r') + +sigma = sqrt(sigma2); +x = linspace(mu-3*sigma, mu+3*sigma, 100); +p = gaussian_prob(x, mu, sigma2); +h = plot(x, p, '-'); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotgauss2d.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotgauss2d.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,130 @@ +function h=plotgauss2d(mu, Sigma) +% PLOTGAUSS2D Plot a 2D Gaussian as an ellipse with optional cross hairs +% h=plotgauss2(mu, Sigma) +% + +h = plotcov2(mu, Sigma); +return; + +%%%%%%%%%%%%%%%%%%%%%%%% + +% PLOTCOV2 - Plots a covariance ellipse with major and minor axes +% for a bivariate Gaussian distribution. +% +% Usage: +% h = plotcov2(mu, Sigma[, OPTIONS]); +% +% Inputs: +% mu - a 2 x 1 vector giving the mean of the distribution. +% Sigma - a 2 x 2 symmetric positive semi-definite matrix giving +% the covariance of the distribution (or the zero matrix). +% +% Options: +% 'conf' - a scalar between 0 and 1 giving the confidence +% interval (i.e., the fraction of probability mass to +% be enclosed by the ellipse); default is 0.9. +% 'num-pts' - the number of points to be used to plot the +% ellipse; default is 100. +% +% This function also accepts options for PLOT. +% +% Outputs: +% h - a vector of figure handles to the ellipse boundary and +% its major and minor axes +% +% See also: PLOTCOV3 + +% Copyright (C) 2002 Mark A. Paskin + +function h = plotcov2(mu, Sigma, varargin) + +if size(Sigma) ~= [2 2], error('Sigma must be a 2 by 2 matrix'); end +if length(mu) ~= 2, error('mu must be a 2 by 1 vector'); end + +[p, ... + n, ... + plot_opts] = process_options(varargin, 'conf', 0.9, ... + 'num-pts', 100); +h = []; +holding = ishold; +if (Sigma == zeros(2, 2)) + z = mu; +else + % Compute the Mahalanobis radius of the ellipsoid that encloses + % the desired probability mass. + k = conf2mahal(p, 2); + % The major and minor axes of the covariance ellipse are given by + % the eigenvectors of the covariance matrix. Their lengths (for + % the ellipse with unit Mahalanobis radius) are given by the + % square roots of the corresponding eigenvalues. + if (issparse(Sigma)) + [V, D] = eigs(Sigma); + else + [V, D] = eig(Sigma); + end + % Compute the points on the surface of the ellipse. + t = linspace(0, 2*pi, n); + u = [cos(t); sin(t)]; + w = (k * V * sqrt(D)) * u; + z = repmat(mu, [1 n]) + w; + % Plot the major and minor axes. + L = k * sqrt(diag(D)); + h = plot([mu(1); mu(1) + L(1) * V(1, 1)], ... + [mu(2); mu(2) + L(1) * V(2, 1)], plot_opts{:}); + hold on; + h = [h; plot([mu(1); mu(1) + L(2) * V(1, 2)], ... + [mu(2); mu(2) + L(2) * V(2, 2)], plot_opts{:})]; +end + +h = [h; plot(z(1, :), z(2, :), plot_opts{:})]; +if (~holding) hold off; end + +%%%%%%%%%%%% + +% CONF2MAHAL - Translates a confidence interval to a Mahalanobis +% distance. Consider a multivariate Gaussian +% distribution of the form +% +% p(x) = 1/sqrt((2 * pi)^d * det(C)) * exp((-1/2) * MD(x, m, inv(C))) +% +% where MD(x, m, P) is the Mahalanobis distance from x +% to m under P: +% +% MD(x, m, P) = (x - m) * P * (x - m)' +% +% A particular Mahalanobis distance k identifies an +% ellipsoid centered at the mean of the distribution. +% The confidence interval associated with this ellipsoid +% is the probability mass enclosed by it. Similarly, +% a particular confidence interval uniquely determines +% an ellipsoid with a fixed Mahalanobis distance. +% +% If X is an d dimensional Gaussian-distributed vector, +% then the Mahalanobis distance of X is distributed +% according to the Chi-squared distribution with d +% degrees of freedom. Thus, the Mahalanobis distance is +% determined by evaluating the inverse cumulative +% distribution function of the chi squared distribution +% up to the confidence value. +% +% Usage: +% +% m = conf2mahal(c, d); +% +% Inputs: +% +% c - the confidence interval +% d - the number of dimensions of the Gaussian distribution +% +% Outputs: +% +% m - the Mahalanobis radius of the ellipsoid enclosing the +% fraction c of the distribution's probability mass +% +% See also: MAHAL2CONF + +% Copyright (C) 2002 Mark A. Paskin + +function m = conf2mahal(c, d) + +m = chi2inv(c, d); % matlab stats toolbox diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/plotgauss2d_old.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/plotgauss2d_old.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,40 @@ +function h=plotgauss2d_old(mu, Sigma, plot_cross) +% PLOTGAUSS2D Plot a 2D Gaussian as an ellipse with optional cross hairs +% h=plotgauss2(mu, Sigma) +% +% h=plotgauss2(mu, Sigma, 1) also plots the major and minor axes +% +% Example +% clf; S=[2 1; 1 2]; plotgauss2d([0;0], S, 1); axis equal + +if nargin < 3, plot_cross = 0; end +[V,D]=eig(Sigma); +lam1 = D(1,1); +lam2 = D(2,2); +v1 = V(:,1); +v2 = V(:,2); +%assert(approxeq(v1' * v2, 0)) +if v1(1)==0 + theta = 0; % horizontal +else + theta = atan(v1(2)/v1(1)); +end +a = sqrt(lam1); +b = sqrt(lam2); +h=plot_ellipse(mu(1), mu(2), theta, a,b); + +if plot_cross + mu = mu(:); + held = ishold; + hold on + minor1 = mu-a*v1; minor2 = mu+a*v1; + hminor = line([minor1(1) minor2(1)], [minor1(2) minor2(2)]); + + major1 = mu-b*v2; major2 = mu+b*v2; + hmajor = line([major1(1) major2(1)], [major1(2) major2(2)]); + %set(hmajor,'color','r') + if ~held + hold off + end +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/polygon_area.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/polygon_area.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function a = polygon_area(x,y) +% AREA Area of a planar polygon. +% AREA(X,Y) Calculates the area of a 2-dimensional +% polygon formed by vertices with coordinate vectors +% X and Y. The result is direction-sensitive: the +% area is positive if the bounding contour is counter- +% clockwise and negative if it is clockwise. +% +% See also TRAPZ. + +% Copyright (c) 1995 by Kirill K. Pankratov, +% kirill@plume.mit.edu. +% 04/20/94, 05/20/95 + + % Make polygon closed ............. +x = [x(:); x(1)]; +y = [y(:); y(1)]; + + % Calculate contour integral Int -y*dx (same as Int x*dy). +lx = length(x); +a = -(x(2:lx)-x(1:lx-1))'*(y(1:lx-1)+y(2:lx))/2; +a = abs(a); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/polygon_centroid.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/polygon_centroid.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,79 @@ +function [x0,y0] = centroid(x,y) +% CENTROID Center of mass of a polygon. +% [X0,Y0] = CENTROID(X,Y) Calculates centroid +% (center of mass) of planar polygon with vertices +% coordinates X, Y. +% Z0 = CENTROID(X+i*Y) returns Z0=X0+i*Y0 the same +% as CENTROID(X,Y). + +% Copyright (c) 1995 by Kirill K. Pankratov, +% kirill@plume.mit.edu. +% 06/01/95, 06/07/95 + +% Algorithm: +% X0 = Int{x*ds}/Int{ds}, where ds - area element +% so that Int{ds} is total area of a polygon. +% Using Green's theorem the area integral can be +% reduced to a contour integral: +% Int{x*ds} = -Int{x^2*dy}, Int{ds} = Int{x*dy} along +% the perimeter of a polygon. +% For a polygon as a sequence of line segments +% this can be reduced exactly to a sum: +% Int{x^2*dy} = Sum{ (x_{i}^2+x_{i+1}^2+x_{i}*x_{i+1})* +% (y_{i+1}-y_{i})}/3; +% Int{x*dy} = Sum{(x_{i}+x_{i+1})(y_{i+1}-y_{i})}/2. +% Similarly +% Y0 = Int{y*ds}/Int{ds}, where +% Int{y*ds} = Int{y^2*dx} = +% = Sum{ (y_{i}^2+y_{i+1}^2+y_{i}*y_{i+1})* +% (x_{i+1}-x_{i})}/3. + + % Handle input ...................... +if nargin==0, help centroid, return, end +if nargin==1 + sz = size(x); + if sz(1)==2 % Matrix 2 by n + y = x(2,:); x = x(1,:); + elseif sz(2)==2 % Matrix n by 2 + y = x(:,2); x = x(:,1); + else + y = imag(x); + x = real(x); + end +end + + % Make a polygon closed .............. +x = [x(:); x(1)]; +y = [y(:); y(1)]; + + % Check length ....................... +l = length(x); +if length(y)~=l + error(' Vectors x and y must have the same length') +end + + % X-mean: Int{x^2*dy} ................ +del = y(2:l)-y(1:l-1); +v = x(1:l-1).^2+x(2:l).^2+x(1:l-1).*x(2:l); +x0 = v'*del; + + % Y-mean: Int{y^2*dx} ................ +del = x(2:l)-x(1:l-1); +v = y(1:l-1).^2+y(2:l).^2+y(1:l-1).*y(2:l); +y0 = v'*del; + + % Calculate area: Int{y*dx} .......... +a = (y(1:l-1)+y(2:l))'*del; +tol= 2*eps; +if abs(a) 1 + exportfig(h, filename, varargin{:}, args{:}); +else + exportfig(h, filename, args{:}); +end + +X = imread(filename,'png'); +height = size(X,1); +width = size(X,2); +delete(filename); +f = figure( 'Name', 'Preview', ... + 'Menubar', 'none', ... + 'NumberTitle', 'off', ... + 'Visible', 'off'); +image(X); +axis image; +ax = findobj(f, 'type', 'axes'); +axesPos = [0 0 width height]; +set(ax, 'Units', 'pixels', ... + 'Position', axesPos, ... + 'Visible', 'off'); +figPos = get(f,'Position'); +rootSize = get(0,'ScreenSize'); +figPos(3:4) = axesPos(3:4); +if figPos(1) + figPos(3) > rootSize(3) + figPos(1) = rootSize(3) - figPos(3) - 50; +end +if figPos(2) + figPos(4) > rootSize(4) + figPos(2) = rootSize(4) - figPos(4) - 50; +end +set(f, 'Position',figPos, ... + 'Visible', 'on'); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/process_options.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/process_options.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,132 @@ +% PROCESS_OPTIONS - Processes options passed to a Matlab function. +% This function provides a simple means of +% parsing attribute-value options. Each option is +% named by a unique string and is given a default +% value. +% +% Usage: [var1, var2, ..., varn[, unused]] = ... +% process_options(args, ... +% str1, def1, str2, def2, ..., strn, defn) +% +% Arguments: +% args - a cell array of input arguments, such +% as that provided by VARARGIN. Its contents +% should alternate between strings and +% values. +% str1, ..., strn - Strings that are associated with a +% particular variable +% def1, ..., defn - Default values returned if no option +% is supplied +% +% Returns: +% var1, ..., varn - values to be assigned to variables +% unused - an optional cell array of those +% string-value pairs that were unused; +% if this is not supplied, then a +% warning will be issued for each +% option in args that lacked a match. +% +% Examples: +% +% Suppose we wish to define a Matlab function 'func' that has +% required parameters x and y, and optional arguments 'u' and 'v'. +% With the definition +% +% function y = func(x, y, varargin) +% +% [u, v] = process_options(varargin, 'u', 0, 'v', 1); +% +% calling func(0, 1, 'v', 2) will assign 0 to x, 1 to y, 0 to u, and 2 +% to v. The parameter names are insensitive to case; calling +% func(0, 1, 'V', 2) has the same effect. The function call +% +% func(0, 1, 'u', 5, 'z', 2); +% +% will result in u having the value 5 and v having value 1, but +% will issue a warning that the 'z' option has not been used. On +% the other hand, if func is defined as +% +% function y = func(x, y, varargin) +% +% [u, v, unused_args] = process_options(varargin, 'u', 0, 'v', 1); +% +% then the call func(0, 1, 'u', 5, 'z', 2) will yield no warning, +% and unused_args will have the value {'z', 2}. This behaviour is +% useful for functions with options that invoke other functions +% with options; all options can be passed to the outer function and +% its unprocessed arguments can be passed to the inner function. + +% Copyright (C) 2002 Mark A. Paskin +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 2 of the License, or +% (at your option) any later version. +% +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +% USA. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [varargout] = process_options(args, varargin) + +% Check the number of input arguments +n = length(varargin); +if (mod(n, 2)) + error('Each option must be a string/value pair.'); +end + +% Check the number of supplied output arguments +if (nargout < (n / 2)) + error('Insufficient number of output arguments given'); +elseif (nargout == (n / 2)) + warn = 1; + nout = n / 2; +else + warn = 0; + nout = n / 2 + 1; +end + +% Set outputs to be defaults +varargout = cell(1, nout); +for i=2:2:n + varargout{i/2} = varargin{i}; +end + +% Now process all arguments +nunused = 0; +for i=1:2:length(args) + found = 0; + for j=1:2:n + if strcmpi(args{i}, varargin{j}) + varargout{(j + 1)/2} = args{i + 1}; + found = 1; + break; + end + end + if (~found) + if (warn) + warning(sprintf('Option ''%s'' not used.', args{i})); + args{i} + else + nunused = nunused + 1; + unused{2 * nunused - 1} = args{i}; + unused{2 * nunused} = args{i + 1}; + end + end +end + +% Assign the unused arguments +if (~warn) + if (nunused) + varargout{nout} = unused; + else + varargout{nout} = cell(0); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rand_psd.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/rand_psd.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,13 @@ +function M = rand_psd(d, d2, k) +% Create a random positive definite matrix of size d by d by k (k defaults to 1) +% M = rand_psd(d, d2, k) default: d2 = d, k = 1 + +if nargin<2, d2 = d; end +if nargin<3, k = 1; end +if d2 ~= d, error('must be square'); end + +M = zeros(d,d,k); +for i=1:k + A = rand(d); + M(:,:,i) = A*A'; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rectintC.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintC.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function [overlap, normoverlap] = rectintC(A,B) +% +% A(i,:) = [x y w h] +% B(j,:) = [x y w h] +% overlap(i,j) = area of intersection +% normoverlap(i,j) = overlap(i,j) / min(area(i), area(j)) +% +% Same as built-in rectint, but faster and uses less memory (since avoids repmat). + + +leftA = A(:,1); +bottomA = A(:,2); +rightA = leftA + A(:,3); +topA = bottomA + A(:,4); + +leftB = B(:,1)'; +bottomB = B(:,2)'; +rightB = leftB + B(:,3)'; +topB = bottomB + B(:,4)'; + +verbose = 0; +[overlap, normoverlap] = rectintLoopC(leftA, rightA, topA, bottomA, leftB, rightB, topB, bottomB, verbose); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rectintLoopC.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintLoopC.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,71 @@ + +#include "mex.h" +#include + +#define MAX(x,y) ((x)>(y) ? (x) : (y)) +#define MIN(x,y) ((x)<(y) ? (x) : (y)) + +void mexFunction( + int nlhs, mxArray *plhs[], + int nrhs, const mxArray *prhs[] + ) +{ + int j,k,m,n,nzmax,*irs,*jcs, *irs2, *jcs2; + double *overlap, *overlap2, tmp, areaA, areaB; + double *leftA, *rightA, *topA, *bottomA; + double *leftB, *rightB, *topB, *bottomB; + double *verbose; + + m = MAX(mxGetM(prhs[0]), mxGetN(prhs[0])); + n = MAX(mxGetM(prhs[4]), mxGetN(prhs[4])); + /* printf("A=%d, B=%d\n", m, n); */ + + leftA = mxGetPr(prhs[0]); + rightA = mxGetPr(prhs[1]); + topA = mxGetPr(prhs[2]); + bottomA = mxGetPr(prhs[3]); + + leftB = mxGetPr(prhs[4]); + rightB = mxGetPr(prhs[5]); + topB = mxGetPr(prhs[6]); + bottomB = mxGetPr(prhs[7]); + + verbose = mxGetPr(prhs[8]); + + plhs[0] = mxCreateDoubleMatrix(m,n, mxREAL); + overlap = mxGetPr(plhs[0]); + + plhs[1] = mxCreateDoubleMatrix(m,n, mxREAL); + overlap2 = mxGetPr(plhs[1]); + + k = 0; + for (j = 0; (j < n); j++) { + int i; + for (i = 0; (i < m); i++) { + tmp = (MAX(0, MIN(rightA[i], rightB[j]) - MAX(leftA[i], leftB[j]) )) * + (MAX(0, MIN(topA[i], topB[j]) - MAX(bottomA[i], bottomB[j]) )); + + if (tmp > 0) { + overlap[k] = tmp; + + areaA = (rightA[i]-leftA[i])*(topA[i]-bottomA[i]); + areaB = (rightB[j]-leftB[j])*(topB[j]-bottomB[j]); + overlap2[k] = tmp/MIN(areaA, areaB); + + if (*verbose) { + printf("j=%d,i=%d,overlap=%5.3f, norm=%5.3f\n", j,i, overlap[k], overlap2[k]); + } + } + + k++; + } + } +} + + + + + + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rectintLoopC.dll Binary file toolboxes/FullBNT-1.0.7/KPMtools/rectintLoopC.dll has changed diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rectintSparse.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintSparse.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,59 @@ +function [overlap, normoverlap] = rectintSparse(A,B) +% +% A(i,:) = [x y w h] +% B(j,:) = [x y w h] +% overlap(i,j) = area of intersection +% normoverla(i,j) +% +% Same as built-in rectint, but uses less memory. +% Use rectintSparseC for a faster version. +% + +leftA = A(:,1); +bottomA = A(:,2); +rightA = leftA + A(:,3); +topA = bottomA + A(:,4); + +leftB = B(:,1)'; +bottomB = B(:,2)'; +rightB = leftB + B(:,3)'; +topB = bottomB + B(:,4)'; + +numRectA = size(A,1); +numRectB = size(B,1); + +%out = rectintSparseLoopC(leftA, rightA, topA, bottomA, leftB, rightB, topB, bottomB); + +nnz = ceil(0.2*numRectA*numRectB); % guess of number of non-zeroes +overlap = sparse([], [], [], numRectA, numRectB, nnz); +normoverlap = sparse([], [], [], numRectA, numRectB, nnz); +for j=1:numRectB + for i=1:numRectA + tmp = (max(0, min(rightA(i), rightB(j)) - max(leftA(i), leftB(j)) ) ) .* ... + (max(0, min(topA(i), topB(j)) - max(bottomA(i), bottomB(j)) ) ); + if tmp>0 + overlap(i,j) = tmp; + areaA = (rightA(i)-leftA(i))*(topA(i)-bottomA(i)); + areaB = (rightB(j)-leftB(j))*(topB(j)-bottomB(j)); + normoverlap(i,j) = min(tmp/areaA, tmp/areaB); + end + %fprintf('j=%d, i=%d, overlap=%5.3f, norm=%5.3f\n',... + % j, i, overlap(i,j), normoverlap(i,j)); + end +end + + +if 0 +N = size(bboxDense01,2); % 1000; +rect = bboxToRect(bboxDense01)'; +A = rect(1:2,:); +B = rect(1:N,:); + +tic; out1 = rectint(A, B); toc +tic; out2 = rectintSparse(A, B); toc +tic; out3 = rectintSparseC(A, B); toc +tic; out4 = rectintC(A, B); toc +assert(approxeq(out1, out2)) +assert(approxeq(out1, full(out3))) +assert(approxeq(out1, out4)) +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseC.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseC.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,32 @@ +function [overlap, normoverlap] = rectintSparseC(A,B) +% +% function [area, normarea] = rectintSparseC(A,B) +% A(i,:) = [x y w h] +% B(j,:) = [x y w h] +% out(i,j) = area of intersection +% +% Same as built-in rectint, but uses less memory. +% Also, returns area of overlap normalized by area of patch. +% See rectintSparse + +if isempty(A) | isempty(B) + overlap = []; + normoverlap = []; + return; +end + +leftA = A(:,1); +bottomA = A(:,2); +rightA = leftA + A(:,3); +topA = bottomA + A(:,4); + +leftB = B(:,1)'; +bottomB = B(:,2)'; +rightB = leftB + B(:,3)'; +topB = bottomB + B(:,4)'; + +numRectA = size(A,1); +numRectB = size(B,1); + +verbose = 0; +[overlap, normoverlap] = rectintSparseLoopC(leftA, rightA, topA, bottomA, leftB, rightB, topB, bottomB, verbose); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseLoopC.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseLoopC.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,147 @@ +/* This is based on +http://www.mathworks.com/access/helpdesk/help/techdoc/matlab_external/ch04cr12.shtml + +See rectintSparse.m for the matlab version of this code. + +*/ + +#include /* Needed for the ceil() prototype. */ +#include "mex.h" +#include + +/* If you are using a compiler that equates NaN to be zero, you + * must compile this example using the flag -DNAN_EQUALS_ZERO. + * For example: + * + * mex -DNAN_EQUALS_ZERO fulltosparse.c + * + * This will correctly define the IsNonZero macro for your C + * compiler. + */ + +#if defined(NAN_EQUALS_ZERO) +#define IsNonZero(d) ((d) != 0.0 || mxIsNaN(d)) +#else +#define IsNonZero(d) ((d) != 0.0) +#endif + +#define MAX(x,y) ((x)>(y) ? (x) : (y)) +#define MIN(x,y) ((x)<(y) ? (x) : (y)) + +void mexFunction( + int nlhs, mxArray *plhs[], + int nrhs, const mxArray *prhs[] + ) +{ + /* Declare variables. */ + int j,k,m,n,nzmax,*irs,*jcs, *irs2, *jcs2; + double *overlap, *overlap2, tmp, areaA, areaB; + double percent_sparse; + double *leftA, *rightA, *topA, *bottomA; + double *leftB, *rightB, *topB, *bottomB; + double *verbose; + + /* Get the size and pointers to input data. */ + m = MAX(mxGetM(prhs[0]), mxGetN(prhs[0])); + n = MAX(mxGetM(prhs[4]), mxGetN(prhs[4])); + /* printf("A=%d, B=%d\n", m, n); */ + + leftA = mxGetPr(prhs[0]); + rightA = mxGetPr(prhs[1]); + topA = mxGetPr(prhs[2]); + bottomA = mxGetPr(prhs[3]); + + leftB = mxGetPr(prhs[4]); + rightB = mxGetPr(prhs[5]); + topB = mxGetPr(prhs[6]); + bottomB = mxGetPr(prhs[7]); + + verbose = mxGetPr(prhs[8]); + + /* Allocate space for sparse matrix. + * NOTE: Assume at most 20% of the data is sparse. Use ceil + * to cause it to round up. + */ + + percent_sparse = 0.01; + nzmax = (int)ceil((double)m*(double)n*percent_sparse); + + plhs[0] = mxCreateSparse(m,n,nzmax,0); + overlap = mxGetPr(plhs[0]); + irs = mxGetIr(plhs[0]); + jcs = mxGetJc(plhs[0]); + + plhs[1] = mxCreateSparse(m,n,nzmax,0); + overlap2 = mxGetPr(plhs[1]); + irs2 = mxGetIr(plhs[1]); + jcs2 = mxGetJc(plhs[1]); + + + /* Assign nonzeros. */ + k = 0; + for (j = 0; (j < n); j++) { + int i; + jcs[j] = k; + jcs2[j] = k; + for (i = 0; (i < m); i++) { + tmp = (MAX(0, MIN(rightA[i], rightB[j]) - MAX(leftA[i], leftB[j]) )) * + (MAX(0, MIN(topA[i], topB[j]) - MAX(bottomA[i], bottomB[j]) )); + + if (*verbose) { + printf("j=%d,i=%d,tmp=%5.3f\n", j,i,tmp); + } + + if (IsNonZero(tmp)) { + + /* Check to see if non-zero element will fit in + * allocated output array. If not, increase + * percent_sparse by 20%, recalculate nzmax, and augment + * the sparse array. + */ + if (k >= nzmax) { + int oldnzmax = nzmax; + percent_sparse += 0.2; + nzmax = (int)ceil((double)m*(double)n*percent_sparse); + + /* Make sure nzmax increases atleast by 1. */ + if (oldnzmax == nzmax) + nzmax++; + printf("reallocating from %d to %d\n", oldnzmax, nzmax); + + mxSetNzmax(plhs[0], nzmax); + mxSetPr(plhs[0], mxRealloc(overlap, nzmax*sizeof(double))); + mxSetIr(plhs[0], mxRealloc(irs, nzmax*sizeof(int))); + overlap = mxGetPr(plhs[0]); + irs = mxGetIr(plhs[0]); + + mxSetNzmax(plhs[1], nzmax); + mxSetPr(plhs[1], mxRealloc(overlap2, nzmax*sizeof(double))); + mxSetIr(plhs[1], mxRealloc(irs2, nzmax*sizeof(int))); + overlap2 = mxGetPr(plhs[1]); + irs2 = mxGetIr(plhs[1]); + } + + overlap[k] = tmp; + irs[k] = i; + + areaA = (rightA[i]-leftA[i])*(topA[i]-bottomA[i]); + areaB = (rightB[j]-leftB[j])*(topB[j]-bottomB[j]); + overlap2[k] = MIN(tmp/areaA, tmp/areaB); + irs2[k] = i; + + k++; + } /* IsNonZero */ + } /* for i */ + } + jcs[n] = k; + jcs2[n] = k; + +} + + + + + + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseLoopC.dll Binary file toolboxes/FullBNT-1.0.7/KPMtools/rectintSparseLoopC.dll has changed diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/repmatC.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/repmatC.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,149 @@ +/* +mex -c mexutil.c +mex repmat.c mexutil.obj +to check for warnings: +gcc -Wall -I/cygdrive/c/MATLAB6p1/extern/include -c repmat.c +*/ +#include "mexutil.h" +#include + +/* repeat a block of memory rep times */ +void memrep(char *dest, size_t chunk, int rep) +{ +#if 0 + /* slow way */ + int i; + char *p = dest; + for(i=1;i>1); +#endif +} + +void repmat(char *dest, const char *src, int ndim, int *destdimsize, + int *dimsize, const int *dims, int *rep) +{ + int d = ndim-1; + int i, chunk; + /* copy the first repetition into dest */ + if(d == 0) { + chunk = dimsize[0]; + memcpy(dest,src,chunk); + } + else { + /* recursively repeat each slice of src */ + for(i=0;i ndimdest) ndimdest = nrep; + rep = mxCalloc(ndimdest, sizeof(int)); + for(i=0;i ndimdest) ndimdest = nrep; + rep = mxCalloc(ndimdest, sizeof(int)); + for(i=0;i ndim) memrep(dest,destdimsize[ndim-1],extra_rep); + if(mxIsComplex(srcmat)) { + src = (char*)mxGetPi(srcmat); + dest = (char*)mxGetPi(plhs[0]); + repmat(dest,src,ndim,destdimsize,dimsize,dims,rep); + if(ndimdest > ndim) memrep(dest,destdimsize[ndim-1],extra_rep); + } +} diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/repmatC.dll Binary file toolboxes/FullBNT-1.0.7/KPMtools/repmatC.dll has changed diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rgb2grayKPM.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/rgb2grayKPM.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,11 @@ +function g = rgb2grayKPM(rgb) +% function g = rgb2grayKPM(rgb) +% rgb2grayKPM Like the built-in function, but if r is already gray, does not cause an error + +[nr nc ncolors] = size(rgb); +if ncolors > 1 + g = rgb2gray(rgb); +else + g = rgb; +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rnd_partition.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/rnd_partition.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,13 @@ +function [train, test] = rnd_partition(data, train_percent); +% function [train, test] = rnd_partition(data, train_percent); +% +% data(:,i) is the i'th example +% train_percent of these columns get put into train, the rest into test + +N = size(data, 2); +ndx = randperm(N); +k = ceil(N*train_percent); +train_ndx = ndx(1:k); +test_ndx = ndx(k+1:end); +train = data(:, train_ndx); +test = data(:, test_ndx); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/rotate_xlabel.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/rotate_xlabel.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,37 @@ +function hText = rotate_xlabel(degrees, newlabels) + +% Posted to comp.soft-sys.matlab on 2003-05-01 13:45:36 PST +% by David Borger (borger@ix.netcom.com) + +xtl = get(gca,'XTickLabel'); +set(gca,'XTickLabel',''); +lxtl = length(xtl); +xtl = newlabels; +if 0 % nargin>1 + lnl = length(newlabels); + if lnl~=lxtl + error('Number of new labels must equal number of old'); + end; + xtl = newlabels; +end; + + +hxLabel=get(gca,'XLabel'); +xLP=get(hxLabel,'Position'); +y=xLP(2); +XTick=get(gca,'XTick'); +y=repmat(y,length(XTick),1); +%fs=get(gca,'fontsize'); +fs = 12; +hText=text(XTick,y,xtl,'fontsize',fs); +set(hText,'Rotation',degrees,'HorizontalAlignment','right'); + +% Modifications by KPM + +ylim = get(gca,'ylim'); +height = ylim(2)-ylim(1); +N = length(hText); +for i=1:N + voffset = ylim(2) - 0*height; + set(hText(i), 'position', [i voffset 0]); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/safeStr.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/safeStr.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function s = safeStr(s) +% Change punctuation characters to they print properly + +s = strrep(s, '\', '/'); +s = strrep(s, '_', '-'); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/sampleUniformInts.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/sampleUniformInts.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,6 @@ +function M = sampleUniformInts(N, r, c) + +% M is an rxc matrix of integers in 1..N + +prob = normalize(ones(N,1)); +M = sample_discrete(prob, r, c); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/sample_discrete.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/sample_discrete.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,40 @@ +function M = sample_discrete(prob, r, c) +% SAMPLE_DISCRETE Like the built in 'rand', except we draw from a non-uniform discrete distrib. +% M = sample_discrete(prob, r, c) +% +% Example: sample_discrete([0.8 0.2], 1, 10) generates a row vector of 10 random integers from {1,2}, +% where the prob. of being 1 is 0.8 and the prob of being 2 is 0.2. + +n = length(prob); + +if nargin == 1 + r = 1; c = 1; +elseif nargin == 2 + c == r; +end + +R = rand(r, c); +M = ones(r, c); +cumprob = cumsum(prob(:)); + +if n < r*c + for i = 1:n-1 + M = M + (R > cumprob(i)); + end +else + % loop over the smaller index - can be much faster if length(prob) >> r*c + cumprob2 = cumprob(1:end-1); + for i=1:r + for j=1:c + M(i,j) = sum(R(i,j) > cumprob2)+1; + end + end +end + + +% Slower, even though vectorized +%cumprob = reshape(cumsum([0 prob(1:end-1)]), [1 1 n]); +%M = sum(R(:,:,ones(n,1)) > cumprob(ones(r,1),ones(c,1),:), 3); + +% convert using a binning algorithm +%M=bindex(R,cumprob); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/set_xtick_label.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/set_xtick_label.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,51 @@ +function set_xtick_label(tick_labels, angle, axis_label) +% SET_XTICK_LABEL Print the xtick labels at an angle instead of horizontally +% set_xtick_label(tick_labels, angle, axis_label) +% +% angle default = 90 +% axis_label default = '' +% +% This is derived from Solution Number: 5375 on mathworks.com +% See set_xtick_label_demo for an example + +if nargin < 2, angle = 90; end +if nargin < 3, axis_label = []; end + +% Reduce the size of the axis so that all the labels fit in the figure. +pos = get(gca,'Position'); +%set(gca,'Position',[pos(1), .2, pos(3) .65]) +%set(gca,'Position',[pos(1), 0, pos(3) .45]) +%set(gca,'Position',[pos(1), 0.1, pos(3) 0.5]) + +ax = axis; % Current axis limits +axis(axis); % Fix the axis limits +Yl = ax(3:4); % Y-axis limits + +%set(gca, 'xtick', 1:length(tick_labels)); +set(gca, 'xtick', 0.7:1:length(tick_labels)); +Xt = get(gca, 'xtick'); + +% Place the text labels +t = text(Xt,Yl(1)*ones(1,length(Xt)),tick_labels); +set(t,'HorizontalAlignment','right','VerticalAlignment','top', 'Rotation', angle); + +% Remove the default labels +set(gca,'XTickLabel','') + +% Get the Extent of each text object. This +% loop is unavoidable. +for i = 1:length(t) + ext(i,:) = get(t(i),'Extent'); +end + +% Determine the lowest point. The X-label will be +% placed so that the top is aligned with this point. +LowYPoint = min(ext(:,2)); + +% Place the axis label at this point +if ~isempty(axis_label) + Xl = get(gca, 'Xlim'); + XMidPoint = Xl(1)+abs(diff(Xl))/2; + tl = text(XMidPoint,LowYPoint, axis_label, 'VerticalAlignment','top', ... + 'HorizontalAlignment','center'); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/set_xtick_label_demo.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/set_xtick_label_demo.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,52 @@ + +% Generate some test data. Assume that the X-axis represents months. +x = 1:12; +y = 10*rand(1,length(x)); + +% Plot the data. +h = plot(x,y,'+'); + +% Add a title. +title('This is a title') + +% Set the X-Tick locations so that every other month is labeled. +Xt = 1:2:11; +Xl = [1 12]; +set(gca,'XTick',Xt,'XLim',Xl); + +% Add the months as tick labels. +months = ['Jan'; + 'Feb'; + 'Mar'; + 'Apr'; + 'May'; + 'Jun'; + 'Jul'; + 'Aug'; + 'Sep'; + 'Oct'; + 'Nov'; + 'Dec']; + +set_xtick_label(months(1:2:12, :), 90, 'xaxis label'); + + + +if 0 + + +% Generate some test data. Assume that the X-axis represents months. +x = 1:8; +y = 10*rand(1,length(x)); + +% Plot the data. +h = plot(x,y,'+'); + +S = subsets(1:3); +str = cell(1,8); +for i=1:2^3 + str{i} = num2str(S{i}); +end +set_xtick_label(str); + +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/setdiag.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/setdiag.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,21 @@ +function M = setdiag(M, v) +% SETDIAG Set the diagonal of a matrix to a specified scalar/vector. +% M = set_diag(M, v) + +n = length(M); +if length(v)==1 + v = repmat(v, 1, n); +end + +% e.g., for 3x3 matrix, elements are numbered +% 1 4 7 +% 2 5 8 +% 3 6 9 +% so diagnoal = [1 5 9] + + +J = 1:n+1:n^2; +M(J) = v; + +%M = triu(M,1) + tril(M,-1) + diag(v); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/softeye.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/softeye.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,11 @@ +function M = softeye(K, p) +% SOFTEYE Make a stochastic matrix with p on the diagonal, and the remaining mass distributed uniformly +% M = softeye(K, p) +% +% M is a K x K matrix. + +M = p*eye(K); +q = 1-p; +for i=1:K + M(i, [1:i-1 i+1:K]) = q/(K-1); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/sort_evec.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/sort_evec.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function [evec, evals] = sort_evec(temp_evec, temp_evals, N) + +if ~isvectorBNT(temp_evals) + temp_evals = diag(temp_evals); +end + +% Eigenvalues nearly always returned in descending order, but just +% to make sure..... +[evals perm] = sort(-temp_evals); +evals = -evals(1:N); +if evals == temp_evals(1:N) + % Originals were in order + evec = temp_evec(:, 1:N); + return +else + fprintf('sorting evec\n'); + % Need to reorder the eigenvectors + for i=1:N + evec(:,i) = temp_evec(:,perm(i)); + end +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/splitLongSeqIntoManyShort.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/splitLongSeqIntoManyShort.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +function short = splitLongSeqIntoManyShort(long, Tsmall) +% splitLongSeqIntoManyShort Put groups of columns into a cell array of narrower matrices +% function short = splitLongSeqIntoManyShort(long, Tsmall) +% +% long(:,t) +% short{i} = long(:,ndx1:ndx2) where each segment (except maybe the last) is of length Tsmall + +T = length(long); +Nsmall = ceil(T/Tsmall); +short = cell(Nsmall,1); + +t = 1; +for i=1:Nsmall + short{i} = long(:,t:min(T,t+Tsmall-1)); + t = t+Tsmall; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/sprintf_intvec.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/sprintf_intvec.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,9 @@ +function s = sprintf_intvec(v) +% SPRINTF_INTVEC Print a vector of ints as comma separated string, with no trailing comma +% function s = sprintf_intvec(v) +% +% e.g., sprintf_intvec(1:3) returns '1,2,3' + +s = sprintf('%d,', v); +s = s(1:end-1); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/sqdist.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/sqdist.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,29 @@ +function m = sqdist(p, q, A) +% SQDIST Squared Euclidean or Mahalanobis distance. +% SQDIST(p,q) returns m(i,j) = (p(:,i) - q(:,j))'*(p(:,i) - q(:,j)). +% SQDIST(p,q,A) returns m(i,j) = (p(:,i) - q(:,j))'*A*(p(:,i) - q(:,j)). + +% From Tom Minka's lightspeed toolbox + +[d, pn] = size(p); +[d, qn] = size(q); + +if nargin == 2 + + pmag = sum(p .* p, 1); + qmag = sum(q .* q, 1); + m = repmat(qmag, pn, 1) + repmat(pmag', 1, qn) - 2*p'*q; + %m = ones(pn,1)*qmag + pmag'*ones(1,qn) - 2*p'*q; + +else + + if isempty(A) | isempty(p) + error('sqdist: empty matrices'); + end + Ap = A*p; + Aq = A*q; + pmag = sum(p .* Ap, 1); + qmag = sum(q .* Aq, 1); + m = repmat(qmag, pn, 1) + repmat(pmag', 1, qn) - 2*p'*Aq; + +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/strmatch_multi.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/strmatch_multi.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,30 @@ +function [posns] = strmatch_multi(keys, strs) +% STRMATCH_MULTI Find where each key occurs in list of strings. +% [pos] = strmatch_multi(key, strs) where key is a string and strs is a cell array of strings +% works like the built-in command sequence pos = strmatch(key, strs, 'exact'), +% except that pos is the first occurrence of key in strs; if there is no occurence, pos is 0. +% +% [posns] = strmatch_multi(keys, strs), where keys is a cell array of strings, +% matches each element of keys. It loops over whichever is shorter, keys or strs. + +if ~iscell(keys), keys = {keys}; end +nkeys = length(keys); +posns = zeros(1, nkeys); +if length(keys) < length(strs) + for i=1:nkeys + %pos = strmatch(keys{i}, strs, 'exact'); + ndx = strcmp(keys{i}, strs); % faster + pos = find(ndx); + if ~isempty(pos) + posns(i) = pos(1); + end + end +else + for s=1:length(strs) + %ndx = strmatch(strs{s}, keys, 'exact'); + ndx = strcmp(strs{s}, keys); + ndx = find(ndx); + posns(ndx) = s; + end +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/strmatch_substr.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/strmatch_substr.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,28 @@ +function ndx = strmatch_substr(str, strs) +% STRMATCH_SUBSTR Like strmatch, except str can match any part of strs{i}, not just prefix. +% ndx = strmatch_substr(str, strs) +% +% Example: +% i = strmatch('max', {'max','minimax','maximum'}) +% returns i = [1; 3] since only 1 and 3 begin with max, but +% i = strmatch_substr('max', {'max','minimax','maximum'}) +% returns i = [1;2;3]; +% +% If str is also a cell array, it is like calling strmatch_substr several times +% and concatenating the results. +% Example: +% +% i = strmatch_substr({'foo', 'dog'}, {'foo', 'hoofoo', 'dog'}) +% returns i = [1;2;3] + +ndx = []; +if ~iscell(str), str = {str}; end +for j=1:length(str) + for i=1:length(strs) + %ind = strfind(strs{i}, str{j}); % not supported in 6.0 + ind = findstr(strs{i}, str{j}); + if ~isempty(ind) + ndx = [ndx; i]; + end + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/strsplit.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/strsplit.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,53 @@ +function parts = strsplit(splitstr, str, option) +%STRSPLIT Split string into pieces. +% +% STRSPLIT(SPLITSTR, STR, OPTION) splits the string STR at every occurrence +% of SPLITSTR and returns the result as a cell array of strings. By default, +% SPLITSTR is not included in the output. +% +% STRSPLIT(SPLITSTR, STR, OPTION) can be used to control how SPLITSTR is +% included in the output. If OPTION is 'include', SPLITSTR will be included +% as a separate string. If OPTION is 'append', SPLITSTR will be appended to +% each output string, as if the input string was split at the position right +% after the occurrence SPLITSTR. If OPTION is 'omit', SPLITSTR will not be +% included in the output. + +% Author: Peter J. Acklam +% Time-stamp: 2004-09-22 08:48:01 +0200 +% E-mail: pjacklam@online.no +% URL: http://home.online.no/~pjacklam + + nargsin = nargin; + error(nargchk(2, 3, nargsin)); + if nargsin < 3 + option = 'omit'; + else + option = lower(option); + end + + splitlen = length(splitstr); + parts = {}; + + while 1 + + k = strfind(str, splitstr); + if isempty(k) + parts{end+1} = str; + break + end + + switch option + case 'include' + parts(end+1:end+2) = {str(1:k(1)-1), splitstr}; + case 'append' + parts{end+1} = str(1 : k(1)+splitlen-1); + case 'omit' + parts{end+1} = str(1 : k(1)-1); + otherwise + error(['Invalid option string -- ', option]); + end + + + str = str(k(1)+splitlen : end); + + end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/subplot2.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/subplot2.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,17 @@ +function subplot2(nrows, ncols, i, j) +% function subplot2(nrows, ncols, i, j) + + +sz = [nrows ncols]; +%k = sub2ind(sz, i, j) +k = sub2ind(sz(end:-1:1), j, i); +subplot(nrows, ncols, k); + +if 0 + ncols_plot = ceil(sqrt(Nplots)); + nrows_plot = ceil(Nplots/ncols_plot); + Nplots = nrows_plot*ncols_plot; + for p=1:Nplots + subplot(nrows_plot, ncols_plot, p); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/subplot3.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/subplot3.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,13 @@ +function fignum = subplot3(nrows, ncols, fignumBase, plotnumBase) +% function subplot3(nrows, ncols, fignumBase, plotnumBase) +% Choose a subplot number, opening a new figure if necessary +% eg nrows=2, ncols = 2, we plot on (fignum, plotnum) = (1,1), (1,2), (1,3), (1,4), (2,1), ... + +nplotsPerFig = nrows*ncols; +fignum = fignumBase + div(plotnumBase-1, nplotsPerFig); +plotnum = wrap(plotnumBase, nplotsPerFig); +figure(fignum); +if plotnum==1, clf; end +subplot(nrows, ncols, plotnum); + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/subsets.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/subsets.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,58 @@ +function [T, bitv] = subsets(S, U, L, sorted, N) +% SUBSETS Create a set of all the subsets of S which have cardinality <= U and >= L +% T = subsets(S, U, L) +% U defaults to length(S), L defaults to 0. +% So subsets(S) generates the powerset of S. +% +% Example: +% T = subsets(1:4, 2, 1) +% T{:} = 1, 2, [1 2], 3, [1 3], [2 3], 4, [1 4], [2 4], [3 4] +% +% T = subsets(S, U, L, sorted) +% If sorted=1, return the subsets in increasing size +% +% Example: +% T = subsets(1:4, 2, 1, 1) +% T{:} = 1, 2, 3, 4, [1 2], [1 3], [2 3], [1 4], [2 4], [3 4] +% +% [T, bitv] = subsets(S, U, L, sorted, N) +% Row i of bitv is a bit vector representation of T{i}, +% where bitv has N columns (representing 1:N). +% N defaults to max(S). +% +% Example: +% [T,bitv] = subsets(2:4, 2^3, 0, 0, 5) +% T{:} = [], 2, 3, [2 3], 4, [2 4], [3 4], [2 3 4] +% bitv= +% 0 0 0 0 0 +% 0 1 0 0 0 +% 0 0 1 0 0 +% 0 1 1 0 0 +% 0 0 0 1 0 +% 0 1 0 1 0 +% 0 0 1 1 0 +% 0 1 1 1 0 + +n = length(S); + +if nargin < 2, U = n; end +if nargin < 3, L = 0; end +if nargin < 4, sorted = 0; end +if nargin < 5, N = max(S); end + +bits = ind2subv(2*ones(1,n), 1:2^n)-1; +sm = sum(bits,2); +masks = bits((sm <= U) & (sm >= L), :); +m = size(masks, 1); +T = cell(1, m); +for i=1:m + s = S(find(masks(i,:))); + T{i} = s; +end + +if sorted + T = sortcell(T); +end + +bitv = zeros(m, N); +bitv(:, S) = masks; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/subsets1.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/subsets1.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,45 @@ +function sub_s=subsets1(s,k) +% SUBSETS1 creates sub-sets of a specific from a given set +% SS = subsets1(S, k) +% +% S is the given set +% k is the required sub-sets size +% +% Example: +% +% >> ss=subsets1([1:4],3); +% >> ss{:} +% ans = +% 1 2 3 +% ans = +% 1 2 4 +% ans = +% 1 3 4 +% ans = +% 2 3 4 +% +% Written by Raanan Yehezkel, 2004 + +if k<0 % special case + error('subset size must be positive'); +elseif k==0 % special case + sub_s={[]}; +else + l=length(s); + ss={}; + if l>=k + if k==1 % Exit condition + for I=1:l + ss{I}=s(I); + end + else + for I=1:l + ss1=subsets1(s([(I+1):l]),k-1); + for J=1:length(ss1) + ss{end+1}=[s(I),ss1{J}]; + end + end + end + end + sub_s=ss; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/subsetsFixedSize.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/subsetsFixedSize.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,45 @@ +function sub_s=subsets1(s,k) +% SUBSETS1 creates sub-sets of a specific size from a given set +% SS = subsets1(S, k) +% +% S is the given set +% k is the required sub-sets size +% +% Example: +% +% >> ss=subsets1([1:4],3); +% >> ss{:} +% ans = +% 1 2 3 +% ans = +% 1 2 4 +% ans = +% 1 3 4 +% ans = +% 2 3 4 +% +% Written by Raanan Yehezkel, 2004 + +if k<0 % special case + error('subset size must be positive'); +elseif k==0 % special case + sub_s={[]}; +else + l=length(s); + ss={}; + if l>=k + if k==1 % Exit condition + for I=1:l + ss{I}=s(I); + end + else + for I=1:l + ss1=subsets1(s([(I+1):l]),k-1); + for J=1:length(ss1) + ss{end+1}=[s(I),ss1{J}]; + end + end + end + end + sub_s=ss; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/subv2ind.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/subv2ind.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,17 @@ +function index = subv2ind(siz,sub) +%SUBV2IND Linear index from subscript vector. +% SUBV2IND(SIZ,SUB) returns an equivalent single index corresponding to a +% subscript vector for an array of size SIZ. +% If SUB is a matrix, with subscript vectors as rows, then the result is a +% column vector. +% +% This is the opposite of IND2SUBV, so that +% SUBV2IND(SIZ,IND2SUBV(SIZ,IND)) == IND. +% +% See also IND2SUBV, SUB2IND. + +%index = subv2indTest(siz,sub); +prev_cum_size = [1 cumprod(siz(1:end-1))]; +%index = (sub-1)*prev_cum_size' + 1; +index = sub*prev_cum_size' - sum(prev_cum_size) + 1; + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/subv2indKPM.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/subv2indKPM.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,89 @@ +/* C mex version of subv2ind*/ +/* 2 inputs, 1 output */ +/* siz, subv */ +/* ndx */ +#include "mex.h" + +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){ + int i, j, k, nCol, nRow, binary, temp; + double *pSize, *pSubv, *pr; + int *cumprod; + + pSize = mxGetPr(prhs[0]); + pSubv = mxGetPr(prhs[1]); + nCol = mxGetNumberOfElements(prhs[0]); + nRow = mxGetM(prhs[1]); + + + if(mxIsEmpty(prhs[1])){ + plhs[0] = mxCreateDoubleMatrix(0, 0, mxREAL); + return; + } + + if(mxIsEmpty(prhs[0])){ + plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL); + *mxGetPr(plhs[0]) = 1; + return; + } + + binary = 2; + for (i = 0; i < nCol; i++){ + if (pSize[i] > 2.0){ + binary = 0; + break; + } + else if(pSize[i] == 1.0){ + binary = 1; + } + } + + plhs[0] = mxCreateDoubleMatrix(nRow, 1, mxREAL); + pr = mxGetPr(plhs[0]); + for(i=0; i max_y), max_y=pos(4)+pos(2)+ff/5*2;end; + else, + oldtitle = h(i); + end +end + +if max_y > plotregion, + scale = (plotregion-min_y)/(max_y-min_y); + for i=1:length(h), + pos = get(h(i),'position'); + pos(2) = (pos(2)-min_y)*scale+min_y; + pos(4) = pos(4)*scale-(1-scale)*ff/5*3; + set(h(i),'position',pos); + end +end + +np = get(gcf,'nextplot'); +set(gcf,'nextplot','add'); +if (oldtitle), + delete(oldtitle); +end +ha=axes('pos',[0 1 1 1],'visible','off','Tag','suptitle'); +ht=text(.5,titleypos-1,str);set(ht,'horizontalalignment','center','fontsize',fs); +set(gcf,'nextplot',np); +axes(haold); +if nargout, + hout=ht; +end + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/unaryEncoding.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/unaryEncoding.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +function U = unaryEncoding(data, K) +% unaryEncoding Encode data(s) as a 1-of-K column vector +% function U = unaryEncoding(data, K) +% +% eg. +% If data = [3 2 2] and K=3, +% then U = [0 0 0 +% 0 1 1 +% 1 0 0] + +if nargin < 2, K = max(data); end +N = length(data); +U = zeros(K,N); +ndx = subv2ind([K N], [data(:)'; 1:N]'); +U(ndx) = 1; +U = reshape(U, [K N]); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/wrap.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/wrap.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,7 @@ +function v = wrap(u,N) +% WRAP Wrap a vector of indices around a torus. +% v = wrap(u,N) +% +% e.g., wrap([-1 0 1 2 3 4], 3) = 2 3 1 2 3 1 + +v = mod(u-1,N)+1; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/xticklabel_rotate90.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/xticklabel_rotate90.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,68 @@ +function xticklabel_rotate90(XTick,varargin) +%XTICKLABEL_ROTATE90 - Rotate numeric Xtick labels by 90 degrees +% +% Syntax: xticklabel_rotate90(XTick) +% +% Input: XTick - vector array of XTick positions & values (numeric) +% +% Output: none +% +% Example 1: Set the positions of the XTicks and rotate them +% figure; plot([1960:2004],randn(45,1)); xlim([1960 2004]); +% xticklabel_rotate90([1960:2:2004]); +% %If you wish, you may set a few text "Property-value" pairs +% xticklabel_rotate90([1960:2:2004],'Color','m','Fontweight','bold'); +% +% Example 2: %Rotate XTickLabels at their current position +% XTick = get(gca,'XTick'); +% xticklabel_rotate90(XTick); +% +% Other m-files required: none +% Subfunctions: none +% MAT-files required: none +% +% See also: TEXT, SET + +% Author: Denis Gilbert, Ph.D., physical oceanography +% Maurice Lamontagne Institute, Dept. of Fisheries and Oceans Canada +% email: gilbertd@dfo-mpo.gc.ca Web: http://www.qc.dfo-mpo.gc.ca/iml/ +% February 1998; Last revision: 24-Mar-2003 + +if ~isnumeric(XTick) + error('XTICKLABEL_ROTATE90 requires a numeric input argument'); +end + +%Make sure XTick is a column vector +XTick = XTick(:); + +%Set the Xtick locations and set XTicklabel to an empty string +set(gca,'XTick',XTick,'XTickLabel','') + +% Define the xtickLabels +xTickLabels = num2str(XTick); + +% Determine the location of the labels based on the position +% of the xlabel +hxLabel = get(gca,'XLabel'); % Handle to xlabel +xLabelString = get(hxLabel,'String'); + +if ~isempty(xLabelString) + warning('You may need to manually reset the XLABEL vertical position') +end + +set(hxLabel,'Units','data'); +xLabelPosition = get(hxLabel,'Position'); +y = xLabelPosition(2); + +%CODE below was modified following suggestions from Urs Schwarz +y=repmat(y,size(XTick,1),1); +% retrieve current axis' fontsize +fs = get(gca,'fontsize'); + +% Place the new xTickLabels by creating TEXT objects +hText = text(XTick, y, xTickLabels,'fontsize',fs); + +% Rotate the text objects by 90 degrees +set(hText,'Rotation',90,'HorizontalAlignment','right',varargin{:}) + +%------------- END OF CODE -------------- diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/zipload.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/zipload.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,47 @@ +%ZIPLOAD Load compressed data file created with ZIPSAVE +% +% [data] = zipload( filename ) +% filename: string variable that contains the name of the +% compressed file (do not include '.zip' extension) +% Use only with files created with 'zipsave' +% pkzip25.exe has to be in the matlab path. This file is a compression utility +% made by Pkware, Inc. It can be dowloaded from: http://www.pkware.com +% Or directly from ftp://ftp.pkware.com/pk250c32.exe, for the Windows 95/NT version. +% This function was tested using 'PKZIP 2.50 Command Line for Windows 9x/NT' +% It is important to use version 2.5 of the utility. Otherwise the command line below +% has to be changed to include the proper options of the compression utility you +% wish to use. +% This function was tested in MATLAB Version 5.3 under Windows NT. +% Fernando A. Brucher - May/25/1999 +% +% Example: +% [loadedData] = zipload('testfile'); +%-------------------------------------------------------------------- + +function [data] = zipload( filename ) + +%--- Decompress data file by calling pkzip (comand line command) --- +% Options used: +% 'extract' = decompress file +% 'silent' = no console output +% 'over=all' = overwrite files + +%eval( ['!pkzip25 -extract -silent -over=all ', filename, '.zip'] ) +eval( ['!pkzip25 -extract -silent -over=all ', filename, '.zip'] ) + + +%--- Load data from decompressed file --- +% try, catch takes care of cases when pkzip fails to decompress a +% valid matlab format file + +try + tmpStruc = load( filename ); + data = tmpStruc.data; +catch, return, end + + +%--- Delete decompressed file --- + +delete( [filename,'.mat'] ) + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/KPMtools/zipsave.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/KPMtools/zipsave.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,42 @@ +%ZIPSAVE Save data in compressed format +% +% zipsave( filename, data ) +% filename: string variable that contains the name of the resulting +% compressed file (do not include '.zip' extension) +% pkzip25.exe has to be in the matlab path. This file is a compression utility +% made by Pkware, Inc. It can be dowloaded from: http://www.pkware.com +% This function was tested using 'PKZIP 2.50 Command Line for Windows 9x/NT' +% It is important to use version 2.5 of the utility. Otherwise the command line below +% has to be changed to include the proper options of the compression utility you +% wish to use. +% This function was tested in MATLAB Version 5.3 under Windows NT. +% Fernando A. Brucher - May/25/1999 +% +% Example: +% testData = [1 2 3; 4 5 6; 7 8 9]; +% zipsave('testfile', testData); +% +% Modified by Kevin Murphy, 26 Feb 2004, to use winzip +%------------------------------------------------------------------------ + +function zipsave( filename, data ) + +%--- Save data in a temporary file in matlab format (.mat)--- + +eval( ['save ''', filename, ''' data'] ) + + +%--- Compress data by calling pkzip (comand line command) --- +% Options used: +% 'add' = add compressed files to the resulting zip file +% 'silent' = no console output +% 'over=all' = overwrite files + +%eval( ['!pkzip25 -silent -add -over=all ', filename, '.zip ', filename,'.mat'] ) +eval( ['!zip ', filename, '.zip ', filename,'.mat'] ) + +%--- Delete temporary matlab format file --- + +delete( [filename,'.mat'] ) + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/AR_to_SS.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/AR_to_SS.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,39 @@ +function [F,H,Q,R,initx, initV] = AR_to_SS(coef, C, y) +% +% Convert a vector auto-regressive model of order k to state-space form. +% [F,H,Q,R] = AR_to_SS(coef, C, y) +% +% X(i) = A(1) X(i-1) + ... + A(k) X(i-k+1) + v, where v ~ N(0, C) +% and A(i) = coef(:,:,i) is the weight matrix for i steps ago. +% We initialize the state vector with [y(:,k)' ... y(:,1)']', since +% the state vector stores [X(i) ... X(i-k+1)]' in order. + +[s s2 k] = size(coef); % s is the size of the state vector +bs = s * ones(1,k); % size of each block + +F = zeros(s*k); +for i=1:k + F(block(1,bs), block(i,bs)) = coef(:,:,i); +end +for i=1:k-1 + F(block(i+1,bs), block(i,bs)) = eye(s); +end + +H = zeros(1*s, k*s); +% we get to see the most recent component of the state vector +H(block(1,bs), block(1,bs)) = eye(s); +%for i=1:k +% H(block(1,bs), block(i,bs)) = eye(s); +%end + +Q = zeros(k*s); +Q(block(1,bs), block(1,bs)) = C; + +R = zeros(s); + +initx = zeros(k*s, 1); +for i=1:k + initx(block(i,bs)) = y(:, k-i+1); % concatenate the first k observation vectors +end + +initV = zeros(k*s); % no uncertainty about the state (since perfectly observable) diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,19 @@ +/AR_to_SS.m/1.1.1.1/Wed May 29 15:59:56 2002// +/README.txt/1.1.1.1/Mon Jun 7 14:39:28 2004// +/SS_to_AR.m/1.1.1.1/Wed May 29 15:59:56 2002// +/convert_to_lagged_form.m/1.1.1.1/Wed May 29 15:59:56 2002// +/ensure_AR.m/1.1.1.1/Wed May 29 15:59:56 2002// +/eval_AR_perf.m/1.1.1.1/Wed May 29 15:59:56 2002// +/kalman_filter.m/1.1.1.1/Wed May 29 15:59:56 2002// +/kalman_forward_backward.m/1.1.1.1/Sat Nov 2 00:32:36 2002// +/kalman_smoother.m/1.1.1.1/Wed May 29 15:59:56 2002// +/kalman_update.m/1.1.1.1/Wed May 29 15:59:56 2002// +/learn_AR.m/1.1.1.1/Wed May 29 15:59:56 2002// +/learn_AR_diagonal.m/1.1.1.1/Wed May 29 15:59:56 2002// +/learn_kalman.m/1.1.1.1/Wed May 29 15:59:56 2002// +/learning_demo.m/1.1.1.1/Wed Oct 23 15:17:42 2002// +/sample_lds.m/1.1.1.1/Fri Jan 24 19:36:02 2003// +/smooth_update.m/1.1.1.1/Wed May 29 15:59:56 2002// +/testKalman.m/1.1.1.1/Thu Jun 9 01:56:34 2005// +/tracking_demo.m/1.1.1.1/Sat Jan 18 22:49:22 2003// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/Kalman diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/README.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/README.txt Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,17 @@ +Kalman filter toolbox written by Kevin Murphy, 1998. +See http://www.ai.mit.edu/~murphyk/Software/kalman.html for details. + +Installation +------------ + +1. Install KPMtools from http://www.ai.mit.edu/~murphyk/Software/KPMtools.html +3. Assuming you installed all these files in your matlab directory, In Matlab type + +addpath matlab/KPMtools +addpath matlab/Kalman + + +Demos +----- +See tracking_demo.m for a demo of 2D tracking. +See learning_demo.m for a demo of parameter estimation using EM. diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/SS_to_AR.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/SS_to_AR.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function [coef, C] = SS_to_AR(F, Q, k, diagonal) +% +% Extract the parameters of a vector autoregresssive process of order k from the state-space form. +% [coef, C] = SS_to_AR(F, Q, k, diagonal) + +if nargin<4, diagonal = 0; end + +s = length(Q) / k; +bs = s*ones(1,k); +coef = zeros(s,s,k); +for i=1:k + if diagonal + coef(:,:,i) = diag(diag(F(block(1,bs), block(i,bs)))); + else + coef(:,:,i) = F(block(1,bs), block(i,bs)); + end +end +C = Q(block(1,bs), block(1,bs)); +if diagonal + C = diag(diag(C)); +end +%C = sqrt(Q(block(1,bs), block(1,bs))); % since cov(1,1) of full vector = C C' diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/convert_to_lagged_form.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/convert_to_lagged_form.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,14 @@ +function yy = convert_to_lagged_form(y, k) +% Create an observation vector yy(:,t) containing the last k values of y, newest first +% e.g., k=2, y = (a1 a2 a3) yy = a2 a3 +% (b1 b2 b3) b2 b2 +% a1 a2 +% b1 b2 + +[s T] = size(y); +bs = s*ones(1,k); +yy = zeros(k*s, T-k+1); +for i=1:k + yy(block(i,bs), :) = y(:, k-i+1:end-i+1); +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/ensure_AR.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/ensure_AR.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ +function [A, C, Q, R, initx, initV] = ensure_AR(A, C, Q, R, initx, initV, k, obs, diagonal) +% +% Ensure that the system matrices have the right form for an autoregressive process. + +ss = length(A); +if nargin<8, obs=ones(ss, 1); end +if nargin<9, diagonal=0; end + +[coef, C] = SS_to_AR(A, Q, k, diagonal); +[A, C, Q, R, initx, initV] = AR_to_SS(coef, C, obs); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/eval_AR_perf.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/eval_AR_perf.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,40 @@ +function [ypred, ll, mse] = eval_AR_perf(coef, C, y, model) +% Evaluate the performance of an AR model. +% +% Inputs +% coef(:,:,k,m) - coef. matrix to use for k steps back, model m +% C(:,:,m) - cov. matrix for model m +% y(:,t) - observation at time t +% model(t) - which model to use at time t (defaults to 1 if not specified) +% +% Outputs +% ypred(:,t) - the predicted value of y at t based on the evidence thru t-1. +% ll - log likelihood +% mse - mean squared error = sum_t d_t . d_t, where d_t = pred(y_t) - y(t) + +[s T] = size(y); +k = size(coef, 3); +M = size(coef, 4); + +if nargin<4, model = ones(1, T); end + +ypred = zeros(s, T); +ypred(:, 1:k) = y(:, 1:k); +mse = 0; +ll = 0; +for j=1:M + c(j) = log(normal_coef(C(:,:,j))); + invC(:,:,j) = inv(C(:,:,j)); +end +coef = reshape(coef, [s s*k M]); + +for t=k+1:T + m = model(t-k); + past = y(:,t-1:-1:t-k); + ypred(:,t) = coef(:, :, m) * past(:); + d = ypred(:,t) - y(:,t); + mse = mse + d' * d; + ll = ll + c(m) - 0.5*(d' * invC(:,:,m) * d); +end +mse = mse / (T-k+1); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/kalman_filter.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/kalman_filter.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,101 @@ +function [x, V, VV, loglik] = kalman_filter(y, A, C, Q, R, init_x, init_V, varargin) +% Kalman filter. +% [x, V, VV, loglik] = kalman_filter(y, A, C, Q, R, init_x, init_V, ...) +% +% INPUTS: +% y(:,t) - the observation at time t +% A - the system matrix +% C - the observation matrix +% Q - the system covariance +% R - the observation covariance +% init_x - the initial state (column) vector +% init_V - the initial state covariance +% +% OPTIONAL INPUTS (string/value pairs [default in brackets]) +% 'model' - model(t)=m means use params from model m at time t [ones(1,T) ] +% In this case, all the above matrices take an additional final dimension, +% i.e., A(:,:,m), C(:,:,m), Q(:,:,m), R(:,:,m). +% However, init_x and init_V are independent of model(1). +% 'u' - u(:,t) the control signal at time t [ [] ] +% 'B' - B(:,:,m) the input regression matrix for model m +% +% OUTPUTS (where X is the hidden state being estimated) +% x(:,t) = E[X(:,t) | y(:,1:t)] +% V(:,:,t) = Cov[X(:,t) | y(:,1:t)] +% VV(:,:,t) = Cov[X(:,t), X(:,t-1) | y(:,1:t)] t >= 2 +% loglik = sum{t=1}^T log P(y(:,t)) +% +% If an input signal is specified, we also condition on it: +% e.g., x(:,t) = E[X(:,t) | y(:,1:t), u(:, 1:t)] +% If a model sequence is specified, we also condition on it: +% e.g., x(:,t) = E[X(:,t) | y(:,1:t), u(:, 1:t), m(1:t)] + +[os T] = size(y); +ss = size(A,1); % size of state space + +% set default params +model = ones(1,T); +u = []; +B = []; +ndx = []; + +args = varargin; +nargs = length(args); +for i=1:2:nargs + switch args{i} + case 'model', model = args{i+1}; + case 'u', u = args{i+1}; + case 'B', B = args{i+1}; + case 'ndx', ndx = args{i+1}; + otherwise, error(['unrecognized argument ' args{i}]) + end +end + +x = zeros(ss, T); +V = zeros(ss, ss, T); +VV = zeros(ss, ss, T); + +loglik = 0; +for t=1:T + m = model(t); + if t==1 + %prevx = init_x(:,m); + %prevV = init_V(:,:,m); + prevx = init_x; + prevV = init_V; + initial = 1; + else + prevx = x(:,t-1); + prevV = V(:,:,t-1); + initial = 0; + end + if isempty(u) + [x(:,t), V(:,:,t), LL, VV(:,:,t)] = ... + kalman_update(A(:,:,m), C(:,:,m), Q(:,:,m), R(:,:,m), y(:,t), prevx, prevV, 'initial', initial); + else + if isempty(ndx) + [x(:,t), V(:,:,t), LL, VV(:,:,t)] = ... + kalman_update(A(:,:,m), C(:,:,m), Q(:,:,m), R(:,:,m), y(:,t), prevx, prevV, ... + 'initial', initial, 'u', u(:,t), 'B', B(:,:,m)); + else + i = ndx{t}; + % copy over all elements; only some will get updated + x(:,t) = prevx; + prevP = inv(prevV); + prevPsmall = prevP(i,i); + prevVsmall = inv(prevPsmall); + [x(i,t), smallV, LL, VV(i,i,t)] = ... + kalman_update(A(i,i,m), C(:,i,m), Q(i,i,m), R(:,:,m), y(:,t), prevx(i), prevVsmall, ... + 'initial', initial, 'u', u(:,t), 'B', B(i,:,m)); + smallP = inv(smallV); + prevP(i,i) = smallP; + V(:,:,t) = inv(prevP); + end + end + loglik = loglik + LL; +end + + + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/kalman_forward_backward.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/kalman_forward_backward.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,97 @@ +% KALMAN_FORWARD_BACKWARD Forward Backward Propogation in Information Form +% +% +% Note : +% +% M file accompanying my technical note +% +% A Technique for Painless Derivation of Kalman Filtering Recursions +% +% available from http://www.mbfys.kun.nl/~cemgil/papers/painless-kalman.ps +% + +% Uses : + +% Change History : +% Date Time Prog Note +% 07-Jun-2001 2:24 PM ATC Created under MATLAB 5.3.1.29215a (R11.1) + +% ATC = Ali Taylan Cemgil, +% SNN - University of Nijmegen, Department of Medical Physics and Biophysics +% e-mail : cemgil@mbfys.kun.nl + +A = [1 1;0 1]; +C = [1 0]; +Q = eye(2)*0.01^2; +R = 0.001^2; +mu1 = [0;1]; +P1 = 3*Q; + +inv_Q = inv(Q); +inv_R = inv(R); + +y = [0 1.1 2 2.95 3.78]; + +T = length(y); +L = size(Q,1); + +%%%%% Forward message Passing +h_f = zeros(L, T); +K_f = zeros(L, L, T); +g_f = zeros(1, T); +h_f_pre = zeros(L, T); +K_f_pre = zeros(L, L, T); +g_f_pre = zeros(1, T); + + +K_f_pre(:, :, 1) = inv(P1); +h_f_pre(:,1) = K_f_pre(:, :, 1)*mu1; +g_f_pre(1) = -0.5*log(det(2*pi*P1)) - 0.5*mu1'*inv(P1)*mu1; + +for i=1:T, + h_f(:,i) = h_f_pre(:,i) + C'*inv_R*y(:,i); + K_f(:,:,i) = K_f_pre(:,:,i) + C'*inv_R*C; + g_f(i) = g_f_pre(i) -0.5*log(det(2*pi*R)) - 0.5*y(:,i)'*inv_R*y(:,i); + if i1, + M = inv(inv_Q + K_b(:,:,i)); + h_b_post(:,i-1) = A'*inv(Q)*M*h_b(:,i); + K_b_post(:,:,i-1) = A'*inv_Q*(Q - M)*inv_Q*A; + g_b_post(i-1) = g_b(i) -0.5*log(det(2*pi*Q)) + 0.5*log(det(2*pi*M)) + 0.5*h_b(:,i)'*M*h_b(:,i); + end; +end; + + +%%%% Smoothed Estimates + +mu = zeros(size(h_f)); +Sig = zeros(size(K_f)); +g = zeros(size(g_f)); +lalpha = zeros(size(g_f)); + +for i=1:T, + Sig(:,:,i) = inv(K_b_post(:,:,i) + K_f(:,:,i)); + mu(:,i) = Sig(:,:,i)*(h_b_post(:,i) + h_f(:,i)); + g(i) = g_b_post(i) + g_f(:,i); + lalpha(i) = g(i) + 0.5*log(det(2*pi*Sig(:,:,i))) + 0.5*mu(:,i)'*inv(Sig(:,:,i))*mu(:,i); +end; \ No newline at end of file diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/kalman_smoother.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/kalman_smoother.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,55 @@ +function [xsmooth, Vsmooth, VVsmooth, loglik] = kalman_smoother(y, A, C, Q, R, init_x, init_V, varargin) +% Kalman/RTS smoother. +% [xsmooth, Vsmooth, VVsmooth, loglik] = kalman_smoother(y, A, C, Q, R, init_x, init_V, ...) +% +% The inputs are the same as for kalman_filter. +% The outputs are almost the same, except we condition on y(:, 1:T) (and u(:, 1:T) if specified), +% instead of on y(:, 1:t). + +[os T] = size(y); +ss = length(A); + +% set default params +model = ones(1,T); +u = []; +B = []; + +args = varargin; +nargs = length(args); +for i=1:2:nargs + switch args{i} + case 'model', model = args{i+1}; + case 'u', u = args{i+1}; + case 'B', B = args{i+1}; + otherwise, error(['unrecognized argument ' args{i}]) + end +end + +xsmooth = zeros(ss, T); +Vsmooth = zeros(ss, ss, T); +VVsmooth = zeros(ss, ss, T); + +% Forward pass +[xfilt, Vfilt, VVfilt, loglik] = kalman_filter(y, A, C, Q, R, init_x, init_V, ... + 'model', model, 'u', u, 'B', B); + +% Backward pass +xsmooth(:,T) = xfilt(:,T); +Vsmooth(:,:,T) = Vfilt(:,:,T); +%VVsmooth(:,:,T) = VVfilt(:,:,T); + +for t=T-1:-1:1 + m = model(t+1); + if isempty(B) + [xsmooth(:,t), Vsmooth(:,:,t), VVsmooth(:,:,t+1)] = ... + smooth_update(xsmooth(:,t+1), Vsmooth(:,:,t+1), xfilt(:,t), Vfilt(:,:,t), ... + Vfilt(:,:,t+1), VVfilt(:,:,t+1), A(:,:,m), Q(:,:,m), [], []); + else + [xsmooth(:,t), Vsmooth(:,:,t), VVsmooth(:,:,t+1)] = ... + smooth_update(xsmooth(:,t+1), Vsmooth(:,:,t+1), xfilt(:,t), Vfilt(:,:,t), ... + Vfilt(:,:,t+1), VVfilt(:,:,t+1), A(:,:,m), Q(:,:,m), B(:,:,m), u(:,t+1)); + end +end + +VVsmooth(:,:,1) = zeros(ss,ss); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/kalman_update.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/kalman_update.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,71 @@ +function [xnew, Vnew, loglik, VVnew] = kalman_update(A, C, Q, R, y, x, V, varargin) +% KALMAN_UPDATE Do a one step update of the Kalman filter +% [xnew, Vnew, loglik] = kalman_update(A, C, Q, R, y, x, V, ...) +% +% INPUTS: +% A - the system matrix +% C - the observation matrix +% Q - the system covariance +% R - the observation covariance +% y(:) - the observation at time t +% x(:) - E[X | y(:, 1:t-1)] prior mean +% V(:,:) - Cov[X | y(:, 1:t-1)] prior covariance +% +% OPTIONAL INPUTS (string/value pairs [default in brackets]) +% 'initial' - 1 means x and V are taken as initial conditions (so A and Q are ignored) [0] +% 'u' - u(:) the control signal at time t [ [] ] +% 'B' - the input regression matrix +% +% OUTPUTS (where X is the hidden state being estimated) +% xnew(:) = E[ X | y(:, 1:t) ] +% Vnew(:,:) = Var[ X(t) | y(:, 1:t) ] +% VVnew(:,:) = Cov[ X(t), X(t-1) | y(:, 1:t) ] +% loglik = log P(y(:,t) | y(:,1:t-1)) log-likelihood of innovatio + +% set default params +u = []; +B = []; +initial = 0; + +args = varargin; +for i=1:2:length(args) + switch args{i} + case 'u', u = args{i+1}; + case 'B', B = args{i+1}; + case 'initial', initial = args{i+1}; + otherwise, error(['unrecognized argument ' args{i}]) + end +end + +% xpred(:) = E[X_t+1 | y(:, 1:t)] +% Vpred(:,:) = Cov[X_t+1 | y(:, 1:t)] + +if initial + if isempty(u) + xpred = x; + else + xpred = x + B*u; + end + Vpred = V; +else + if isempty(u) + xpred = A*x; + else + xpred = A*x + B*u; + end + Vpred = A*V*A' + Q; +end + +e = y - C*xpred; % error (innovation) +n = length(e); +ss = length(A); +S = C*Vpred*C' + R; +Sinv = inv(S); +ss = length(V); +loglik = gaussian_prob(e, zeros(1,length(e)), S, 1); +K = Vpred*C'*Sinv; % Kalman gain matrix +% If there is no observation vector, set K = zeros(ss). +xnew = xpred + K*e; +Vnew = (eye(ss) - K*C)*Vpred; +VVnew = (eye(ss) - K*C)*A*V; + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/learn_AR.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/learn_AR.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,30 @@ +function [coef, C] = learn_AR(data, k) +% Find the ML parameters of a vector autoregressive process of order k. +% [coef, C] = learn_AR(k, data) +% data{l}(:,t) = the observations at time t in sequence l + +warning('learn_AR seems to be broken'); + +nex = length(data); +obs = cell(1, nex); +for l=1:nex + obs{l} = convert_to_lagged_form(data{l}, k); +end + +% The initial parameter values don't matter, since this is a perfectly observable problem. +% However, the size of F must be set correctly. +y = data{1}; +[s T] = size(y); +coef = rand(s,s,k); +C = rand_psd(s); +[F,H,Q,R,initx,initV] = AR_to_SS(coef, C, y); + +max_iter = 1; +fully_observed = 1; +diagQ = 0; +diagR = 0; +[F, H, Q, R, initx, initV, loglik] = ... + learn_kalman(obs, F, H, Q, R, initx, initV, max_iter, diagQ, diagR, fully_observed); + +[coef, C] = SS_to_AR(F, Q, k); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/learn_AR_diagonal.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/learn_AR_diagonal.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,20 @@ +function [coef, C] = learn_AR_diagonal(y, k) +% Find the ML parameters for a collection of independent scalar AR processes. + +% sep_coef(1,1,t,i) is the coefficient to apply to compopnent i of the state vector t steps ago +% eg. consider two components L and R and let A = coef(:,:,1,:), B = coef(:,:,2,:) +% L3 (AL 0 BL 0) (L2) (CL 0 0 0) +% R3 = (0 AR 0 BR) (R2) (0 CR 0 0) +% L2 (1 0 0 0 ) (L1) + (0 0 0 0) +% R2 (0 1 0 0 ) (R1) (0 0 0 0) + +ss = size(y, 1); +sep_coef = zeros(1,1,k,ss); +for i=1:ss + [sep_coef(:,:,:,i), sep_cov(i)] = learn_AR(k, y(i,:)); +end +C = diag(sep_cov); +for t=1:k + x = sep_coef(1,1,t,:); + coef(:,:,t) = diag(x(:)); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/learn_kalman.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/learn_kalman.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,182 @@ +function [A, C, Q, R, initx, initV, LL] = ... + learn_kalman(data, A, C, Q, R, initx, initV, max_iter, diagQ, diagR, ARmode, constr_fun, varargin) +% LEARN_KALMAN Find the ML parameters of a stochastic Linear Dynamical System using EM. +% +% [A, C, Q, R, INITX, INITV, LL] = LEARN_KALMAN(DATA, A0, C0, Q0, R0, INITX0, INITV0) fits +% the parameters which are defined as follows +% x(t+1) = A*x(t) + w(t), w ~ N(0, Q), x(0) ~ N(init_x, init_V) +% y(t) = C*x(t) + v(t), v ~ N(0, R) +% A0 is the initial value, A is the final value, etc. +% DATA(:,t,l) is the observation vector at time t for sequence l. If the sequences are of +% different lengths, you can pass in a cell array, so DATA{l} is an O*T matrix. +% LL is the "learning curve": a vector of the log lik. values at each iteration. +% LL might go positive, since prob. densities can exceed 1, although this probably +% indicates that something has gone wrong e.g., a variance has collapsed to 0. +% +% There are several optional arguments, that should be passed in the following order. +% LEARN_KALMAN(DATA, A0, C0, Q0, R0, INITX0, INITV0, MAX_ITER, DIAGQ, DIAGR, ARmode) +% MAX_ITER specifies the maximum number of EM iterations (default 10). +% DIAGQ=1 specifies that the Q matrix should be diagonal. (Default 0). +% DIAGR=1 specifies that the R matrix should also be diagonal. (Default 0). +% ARMODE=1 specifies that C=I, R=0. i.e., a Gauss-Markov process. (Default 0). +% This problem has a global MLE. Hence the initial parameter values are not important. +% +% LEARN_KALMAN(DATA, A0, C0, Q0, R0, INITX0, INITV0, MAX_ITER, DIAGQ, DIAGR, F, P1, P2, ...) +% calls [A,C,Q,R,initx,initV] = f(A,C,Q,R,initx,initV,P1,P2,...) after every M step. f can be +% used to enforce any constraints on the params. +% +% For details, see +% - Ghahramani and Hinton, "Parameter Estimation for LDS", U. Toronto tech. report, 1996 +% - Digalakis, Rohlicek and Ostendorf, "ML Estimation of a stochastic linear system with the EM +% algorithm and its application to speech recognition", +% IEEE Trans. Speech and Audio Proc., 1(4):431--442, 1993. + + +% learn_kalman(data, A, C, Q, R, initx, initV, max_iter, diagQ, diagR, ARmode, constr_fun, varargin) +if nargin < 8, max_iter = 10; end +if nargin < 9, diagQ = 0; end +if nargin < 10, diagR = 0; end +if nargin < 11, ARmode = 0; end +if nargin < 12, constr_fun = []; end +verbose = 1; +thresh = 1e-4; + + +if ~iscell(data) + N = size(data, 3); + data = num2cell(data, [1 2]); % each elt of the 3rd dim gets its own cell +else + N = length(data); +end + +N = length(data); +ss = size(A, 1); +os = size(C,1); + +alpha = zeros(os, os); +Tsum = 0; +for ex = 1:N + %y = data(:,:,ex); + y = data{ex}; + T = length(y); + Tsum = Tsum + T; + alpha_temp = zeros(os, os); + for t=1:T + alpha_temp = alpha_temp + y(:,t)*y(:,t)'; + end + alpha = alpha + alpha_temp; +end + +previous_loglik = -inf; +loglik = 0; +converged = 0; +num_iter = 1; +LL = []; + +% Convert to inline function as needed. +if ~isempty(constr_fun) + constr_fun = fcnchk(constr_fun,length(varargin)); +end + + +while ~converged & (num_iter <= max_iter) + + %%% E step + + delta = zeros(os, ss); + gamma = zeros(ss, ss); + gamma1 = zeros(ss, ss); + gamma2 = zeros(ss, ss); + beta = zeros(ss, ss); + P1sum = zeros(ss, ss); + x1sum = zeros(ss, 1); + loglik = 0; + + for ex = 1:N + y = data{ex}; + T = length(y); + [beta_t, gamma_t, delta_t, gamma1_t, gamma2_t, x1, V1, loglik_t] = ... + Estep(y, A, C, Q, R, initx, initV, ARmode); + beta = beta + beta_t; + gamma = gamma + gamma_t; + delta = delta + delta_t; + gamma1 = gamma1 + gamma1_t; + gamma2 = gamma2 + gamma2_t; + P1sum = P1sum + V1 + x1*x1'; + x1sum = x1sum + x1; + %fprintf(1, 'example %d, ll/T %5.3f\n', ex, loglik_t/T); + loglik = loglik + loglik_t; + end + LL = [LL loglik]; + if verbose, fprintf(1, 'iteration %d, loglik = %f\n', num_iter, loglik); end + %fprintf(1, 'iteration %d, loglik/NT = %f\n', num_iter, loglik/Tsum); + num_iter = num_iter + 1; + + %%% M step + + % Tsum = N*T + % Tsum1 = N*(T-1); + Tsum1 = Tsum - N; + A = beta * inv(gamma1); + %A = (gamma1' \ beta')'; + Q = (gamma2 - A*beta') / Tsum1; + if diagQ + Q = diag(diag(Q)); + end + if ~ARmode + C = delta * inv(gamma); + %C = (gamma' \ delta')'; + R = (alpha - C*delta') / Tsum; + if diagR + R = diag(diag(R)); + end + end + initx = x1sum / N; + initV = P1sum/N - initx*initx'; + + if ~isempty(constr_fun) + [A,C,Q,R,initx,initV] = feval(constr_fun, A, C, Q, R, initx, initV, varargin{:}); + end + + converged = em_converged(loglik, previous_loglik, thresh); + previous_loglik = loglik; +end + + + +%%%%%%%%% + +function [beta, gamma, delta, gamma1, gamma2, x1, V1, loglik] = ... + Estep(y, A, C, Q, R, initx, initV, ARmode) +% +% Compute the (expected) sufficient statistics for a single Kalman filter sequence. +% + +[os T] = size(y); +ss = length(A); + +if ARmode + xsmooth = y; + Vsmooth = zeros(ss, ss, T); % no uncertainty about the hidden states + VVsmooth = zeros(ss, ss, T); + loglik = 0; +else + [xsmooth, Vsmooth, VVsmooth, loglik] = kalman_smoother(y, A, C, Q, R, initx, initV); +end + +delta = zeros(os, ss); +gamma = zeros(ss, ss); +beta = zeros(ss, ss); +for t=1:T + delta = delta + y(:,t)*xsmooth(:,t)'; + gamma = gamma + xsmooth(:,t)*xsmooth(:,t)' + Vsmooth(:,:,t); + if t>1 beta = beta + xsmooth(:,t)*xsmooth(:,t-1)' + VVsmooth(:,:,t); end +end +gamma1 = gamma - xsmooth(:,T)*xsmooth(:,T)' - Vsmooth(:,:,T); +gamma2 = gamma - xsmooth(:,1)*xsmooth(:,1)' - Vsmooth(:,:,1); + +x1 = xsmooth(:,1); +V1 = Vsmooth(:,:,1); + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/learning_demo.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/learning_demo.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,36 @@ +% Make a point move in the 2D plane +% State = (x y xdot ydot). We only observe (x y). +% Generate data from this process, and try to learn the dynamics back. + +% X(t+1) = F X(t) + noise(Q) +% Y(t) = H X(t) + noise(R) + +ss = 4; % state size +os = 2; % observation size +F = [1 0 1 0; 0 1 0 1; 0 0 1 0; 0 0 0 1]; +H = [1 0 0 0; 0 1 0 0]; +Q = 0.1*eye(ss); +R = 1*eye(os); +initx = [10 10 1 0]'; +initV = 10*eye(ss); + +seed = 1; +rand('state', seed); +randn('state', seed); +T = 100; +[x,y] = sample_lds(F, H, Q, R, initx, T); + +% Initializing the params to sensible values is crucial. +% Here, we use the true values for everything except F and H, +% which we initialize randomly (bad idea!) +% Lack of identifiability means the learned params. are often far from the true ones. +% All that EM guarantees is that the likelihood will increase. +F1 = randn(ss,ss); +H1 = randn(os,ss); +Q1 = Q; +R1 = R; +initx1 = initx; +initV1 = initV; +max_iter = 10; +[F2, H2, Q2, R2, initx2, initV2, LL] = learn_kalman(y, F1, H1, Q1, R1, initx1, initV1, max_iter); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/sample_lds.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/sample_lds.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,65 @@ +function [x,y] = sample_lds(F, H, Q, R, init_state, T, models, G, u) +% SAMPLE_LDS Simulate a run of a (switching) stochastic linear dynamical system. +% [x,y] = switching_lds_draw(F, H, Q, R, init_state, models, G, u) +% +% x(t+1) = F*x(t) + G*u(t) + w(t), w ~ N(0, Q), x(0) = init_state +% y(t) = H*x(t) + v(t), v ~ N(0, R) +% +% Input: +% F(:,:,i) - the transition matrix for the i'th model +% H(:,:,i) - the observation matrix for the i'th model +% Q(:,:,i) - the transition covariance for the i'th model +% R(:,:,i) - the observation covariance for the i'th model +% init_state(:,i) - the initial mean for the i'th model +% T - the num. time steps to run for +% +% Optional inputs: +% models(t) - which model to use at time t. Default = ones(1,T) +% G(:,:,i) - the input matrix for the i'th model. Default = 0. +% u(:,t) - the input vector at time t. Default = zeros(1,T) +% +% Output: +% x(:,t) - the hidden state vector at time t. +% y(:,t) - the observation vector at time t. + + +if ~iscell(F) + F = num2cell(F, [1 2]); + H = num2cell(H, [1 2]); + Q = num2cell(Q, [1 2]); + R = num2cell(R, [1 2]); +end + +M = length(F); +%T = length(models); + +if nargin < 7, + models = ones(1,T); +end +if nargin < 8, + G = num2cell(repmat(0, [1 1 M])); + u = zeros(1,T); +end + +[os ss] = size(H{1}); +state_noise_samples = cell(1,M); +obs_noise_samples = cell(1,M); +for i=1:M + state_noise_samples{i} = sample_gaussian(zeros(length(Q{i}),1), Q{i}, T)'; + obs_noise_samples{i} = sample_gaussian(zeros(length(R{i}),1), R{i}, T)'; +end + +x = zeros(ss, T); +y = zeros(os, T); + +m = models(1); +x(:,1) = init_state(:,m); +y(:,1) = H{m}*x(:,1) + obs_noise_samples{m}(:,1); + +for t=2:T + m = models(t); + x(:,t) = F{m}*x(:,t-1) + G{m}*u(:,t-1) + state_noise_samples{m}(:,t); + y(:,t) = H{m}*x(:,t) + obs_noise_samples{m}(:,t); +end + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/smooth_update.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/smooth_update.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,36 @@ +function [xsmooth, Vsmooth, VVsmooth_future] = smooth_update(xsmooth_future, Vsmooth_future, ... + xfilt, Vfilt, Vfilt_future, VVfilt_future, A, Q, B, u) +% One step of the backwards RTS smoothing equations. +% function [xsmooth, Vsmooth, VVsmooth_future] = smooth_update(xsmooth_future, Vsmooth_future, ... +% xfilt, Vfilt, Vfilt_future, VVfilt_future, A, B, u) +% +% INPUTS: +% xsmooth_future = E[X_t+1|T] +% Vsmooth_future = Cov[X_t+1|T] +% xfilt = E[X_t|t] +% Vfilt = Cov[X_t|t] +% Vfilt_future = Cov[X_t+1|t+1] +% VVfilt_future = Cov[X_t+1,X_t|t+1] +% A = system matrix for time t+1 +% Q = system covariance for time t+1 +% B = input matrix for time t+1 (or [] if none) +% u = input vector for time t+1 (or [] if none) +% +% OUTPUTS: +% xsmooth = E[X_t|T] +% Vsmooth = Cov[X_t|T] +% VVsmooth_future = Cov[X_t+1,X_t|T] + +%xpred = E[X(t+1) | t] +if isempty(B) + xpred = A*xfilt; +else + xpred = A*xfilt + B*u; +end +Vpred = A*Vfilt*A' + Q; % Vpred = Cov[X(t+1) | t] +J = Vfilt * A' * inv(Vpred); % smoother gain matrix +xsmooth = xfilt + J*(xsmooth_future - xpred); +Vsmooth = Vfilt + J*(Vsmooth_future - Vpred)*J'; +VVsmooth_future = VVfilt_future + (Vsmooth_future - Vfilt_future)*inv(Vfilt_future)*VVfilt_future; + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/testKalman.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/testKalman.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,2 @@ +tracking_demo +learning_demo diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/Kalman/tracking_demo.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/Kalman/tracking_demo.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,74 @@ +% Make a point move in the 2D plane +% State = (x y xdot ydot). We only observe (x y). + +% This code was used to generate Figure 15.9 of "Artificial Intelligence: a Modern Approach", +% Russell and Norvig, 2nd edition, Prentice Hall, 2003. + +% X(t+1) = F X(t) + noise(Q) +% Y(t) = H X(t) + noise(R) + +ss = 4; % state size +os = 2; % observation size +F = [1 0 1 0; 0 1 0 1; 0 0 1 0; 0 0 0 1]; +H = [1 0 0 0; 0 1 0 0]; +Q = 0.1*eye(ss); +R = 1*eye(os); +initx = [10 10 1 0]'; +initV = 10*eye(ss); + +seed = 9; +rand('state', seed); +randn('state', seed); +T = 15; +[x,y] = sample_lds(F, H, Q, R, initx, T); + +[xfilt, Vfilt, VVfilt, loglik] = kalman_filter(y, F, H, Q, R, initx, initV); +[xsmooth, Vsmooth] = kalman_smoother(y, F, H, Q, R, initx, initV); + +dfilt = x([1 2],:) - xfilt([1 2],:); +mse_filt = sqrt(sum(sum(dfilt.^2))) + +dsmooth = x([1 2],:) - xsmooth([1 2],:); +mse_smooth = sqrt(sum(sum(dsmooth.^2))) + + +figure(1) +clf +%subplot(2,1,1) +hold on +plot(x(1,:), x(2,:), 'ks-'); +plot(y(1,:), y(2,:), 'g*'); +plot(xfilt(1,:), xfilt(2,:), 'rx:'); +for t=1:T, plotgauss2d(xfilt(1:2,t), Vfilt(1:2, 1:2, t)); end +hold off +legend('true', 'observed', 'filtered', 3) +xlabel('x') +ylabel('y') + + + +% 3x3 inches +set(gcf,'units','inches'); +set(gcf,'PaperPosition',[0 0 3 3]) +%print(gcf,'-depsc','/home/eecs/murphyk/public_html/Bayes/Figures/aima_filtered.eps'); +%print(gcf,'-djpeg','-r100', '/home/eecs/murphyk/public_html/Bayes/Figures/aima_filtered.jpg'); + + +figure(2) +%subplot(2,1,2) +hold on +plot(x(1,:), x(2,:), 'ks-'); +plot(y(1,:), y(2,:), 'g*'); +plot(xsmooth(1,:), xsmooth(2,:), 'rx:'); +for t=1:T, plotgauss2d(xsmooth(1:2,t), Vsmooth(1:2, 1:2, t)); end +hold off +legend('true', 'observed', 'smoothed', 3) +xlabel('x') +ylabel('y') + + +% 3x3 inches +set(gcf,'units','inches'); +set(gcf,'PaperPosition',[0 0 3 3]) +%print(gcf,'-djpeg','-r100', '/home/eecs/murphyk/public_html/Bayes/Figures/aima_smoothed.jpg'); +%print(gcf,'-depsc','/home/eecs/murphyk/public_html/Bayes/Figures/aima_smoothed.eps'); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,3 @@ +/assocarray.m/1.1.1.1/Wed May 29 15:59:52 2002// +/subsref.m/1.1.1.1/Wed Aug 4 19:36:30 2004// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/@assocarray diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/@assocarray/assocarray.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/assocarray.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ +function A = assocarray(keys, vals) +% ASSOCARRAY Make an associative array +% function A = assocarray(keys, vals) +% +% keys{i} is the i'th string, vals{i} is the i'th value. +% After construction, A('foo') will return the value associated with foo. + +A.keys = keys; +A.vals = vals; +A = class(A, 'assocarray'); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/@assocarray/subsref.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/@assocarray/subsref.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,15 @@ +function val = subsref(A, S) +% SUBSREF Subscript reference for an associative array +% A('foo') will return the value associated with foo. +% If there are multiple identicaly keys, the first match is returned. +% Currently the search is sequential. + +i = 1; +while i <= length(A.keys) + if strcmp(S.subs{1}, A.keys{i}) + val = A.vals{i}; + return; + end + i = i + 1; +end +error(['can''t find ' S.subs{1}]) diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,2 @@ +/boolean_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@boolean_CPD diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/boolean_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@boolean_CPD/boolean_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,179 @@ +function CPD = boolean_CPD(bnet, self, ftype, fname, pfail) +% BOOLEAN_CPD Make a tabular CPD representing a (noisy) boolean function +% +% CPD = boolean_cpd(bnet, self, 'inline', f) uses the inline function f +% to specify the CPT. +% e.g., suppose X4 = X2 AND (NOT X3). Then we can write +% bnet.CPD{4} = boolean_CPD(bnet, 4, 'inline', inline('(x(1) & ~x(2)')); +% Note that x(1) refers pvals(1) = X2, and x(2) refers to pvals(2)=X3. +% +% CPD = boolean_cpd(bnet, self, 'named', f) assumes f is a function name. +% f can be built-in to matlab, or a file. +% e.g., If X4 = X2 AND X3, we can write +% bnet.CPD{4} = boolean_CPD(bnet, 4, 'named', 'and'); +% e.g., If X4 = X2 OR X3, we can write +% bnet.CPD{4} = boolean_CPD(bnet, 4, 'named', 'any'); +% +% CPD = boolean_cpd(bnet, self, 'rnd') makes a random non-redundant bool fn. +% +% CPD = boolean_CPD(bnet, self, 'inline'/'named', f, pfail) +% will put probability mass 1-pfail on f(parents), and put pfail on the other value. +% This is useful for simulating noisy boolean functions. +% If pfail is omitted, it is set to 0. +% (Note that adding noise to a random (non-redundant) boolean function just creates a different +% (potentially redundant) random boolean function.) +% +% Note: This cannot be used to simulate a noisy-OR gate. +% Example: suppose C has parents A and B, and the +% link of A->C fails with prob pA and the link B->C fails with pB. +% Then the noisy-OR gate defines the following distribution +% +% A B P(C=0) +% 0 0 1.0 +% 1 0 pA +% 0 1 pB +% 1 1 pA * PB +% +% By contrast, boolean_CPD(bnet, C, 'any', p) would define +% +% A B P(C=0) +% 0 0 1-p +% 1 0 p +% 0 1 p +% 1 1 p + + +if nargin==0 + % This occurs if we are trying to load an object from a file. + CPD = tabular_CPD(bnet, self); + return; +elseif isa(bnet, 'boolean_CPD') + % This might occur if we are copying an object. + CPD = bnet; + return; +end + +if nargin < 5, pfail = 0; end + +ps = parents(bnet.dag, self); +ns = bnet.node_sizes; +psizes = ns(ps); +self_size = ns(self); + +psucc = 1-pfail; + +k = length(ps); +switch ftype + case 'inline', f = eval_bool_fn(fname, k); + case 'named', f = eval_bool_fn(fname, k); + case 'rnd', f = mk_rnd_bool_fn(k); + otherwise, error(['unknown function type ' ftype]); +end + +CPT = zeros(prod(psizes), self_size); +ndx = find(f==0); +CPT(ndx, 1) = psucc; +CPT(ndx, 2) = pfail; +ndx = find(f==1); +CPT(ndx, 2) = psucc; +CPT(ndx, 1) = pfail; +if k > 0 + CPT = reshape(CPT, [psizes self_size]); +end + +clamp = 1; +CPD = tabular_CPD(bnet, self, CPT, [], clamp); + + + +%%%%%%%%%%%% + +function f = eval_bool_fn(fname, n) +% EVAL_BOOL_FN Evaluate a boolean function on all bit vectors of length n +% f = eval_bool_fn(fname, n) +% +% e.g. f = eval_bool_fn(inline('x(1) & x(3)'), 3) +% returns 0 0 0 0 0 1 0 1 + +ns = 2*ones(1, n); +f = zeros(1, 2^n); +bits = ind2subv(ns, 1:2^n); +for i=1:2^n + f(i) = feval(fname, bits(i,:)-1); +end + +%%%%%%%%%%%%%%% + +function f = mk_rnd_bool_fn(n) +% MK_RND_BOOL_FN Make a random bit vector of length n that encodes a non-redundant boolean function +% f = mk_rnd_bool_fn(n) + +red = 1; +while red + f = sample_discrete([0.5 0.5], 2^n, 1)-1; + red = redundant_bool_fn(f); +end + +%%%%%%%% + + +function red = redundant_bool_fn(f) +% REDUNDANT_BOOL_FN Does a boolean function depend on all its input values? +% r = redundant_bool_fn(f) +% +% f is a vector of length 2^n, representing the output for each bit vector. +% An input is redundant if there is no assignment to the other bits +% which changes the output e.g., input 1 is redundant if u(2:n) s.t., +% f([0 u(2:n)]) <> f([1 u(2:n)]). +% A function is redundant it it has any redundant inputs. + +n = log2(length(f)); +ns = 2*ones(1,n); +red = 0; +for i=1:n + ens = ns; + ens(i) = 1; + U = ind2subv(ens, 1:2^(n-1)); + U(:,i) = 1; + f1 = f(subv2ind(ns, U)); + U(:,i) = 2; + f2 = f(subv2ind(ns, U)); + if isequal(f1, f2) + red = 1; + return; + end +end + + +%%%%%%%%%% + +function [b, iter] = rnd_truth_table(N) +% RND_TRUTH_TABLE Construct the output of a random truth table s.t. each input is non-redundant +% b = rnd_truth_table(N) +% +% N is the number of inputs. +% b is a random bit string of length N, representing the output of the truth table. +% Non-redundant means that, for each input position k, +% there are at least two bit patterns, u and v, that differ only in the k'th position, +% s.t., f(u) ~= f(v), where f is the function represented by b. +% We use rejection sampling to ensure non-redundancy. +% +% Example: b = [0 0 0 1 0 0 0 1] is indep of 3rd input (AND of inputs 1 and 2) + +bits = ind2subv(2*ones(1,N), 1:2^N)-1; +redundant = 1; +iter = 0; +while redundant & (iter < 4) + iter = iter + 1; + b = sample_discrete([0.5 0.5], 1, 2^N)-1; + redundant = 0; + for i=1:N + on = find(bits(:,i)==1); + off = find(bits(:,i)==0); + if isequal(b(on), b(off)) + redundant = 1; + break; + end + end +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,2 @@ +/deterministic_CPD.m/1.1.1.1/Mon Oct 7 13:26:36 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@deterministic_CPD diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/deterministic_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@deterministic_CPD/deterministic_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,59 @@ +function CPD = deterministic_CPD(bnet, self, fname, pfail) +% DETERMINISTIC_CPD Make a tabular CPD representing a (noisy) deterministic function +% +% CPD = deterministic_CPD(bnet, self, fname) +% This calls feval(fname, pvals) for each possible vector of parent values. +% e.g., suppose there are 2 ternary parents, then pvals = +% [1 1], [2 1], [3 1], [1 2], [2 2], [3 2], [1 3], [2 3], [3 3] +% If v = feval(fname, pvals(i)), then +% CPD(x | parents=pvals(i)) = 1 if x==v, and = 0 if x<>v +% e.g., suppose X4 = X2 AND (NOT X3). Then +% bnet.CPD{4} = deterministic_CPD(bnet, 4, inline('((x(1)-1) & ~(x(2)-1)) + 1')); +% Note that x(1) refers pvals(1) = X2, and x(2) refers to pvals(2)=X3 +% See also boolean_CPD. +% +% CPD = deterministic_CPD(bnet, self, fname, pfail) +% will put probability mass 1-pfail on f(parents), and distribute pfail over the other values. +% This is useful for simulating noisy deterministic functions. +% If pfail is omitted, it is set to 0. +% + + +if nargin==0 + % This occurs if we are trying to load an object from a file. + CPD = tabular_CPD(bnet, self); + return; +elseif isa(bnet, 'deterministic_CPD') + % This might occur if we are copying an object. + CPD = bnet; + return; +end + +if nargin < 4, pfail = 0; end + +ps = parents(bnet.dag, self); +ns = bnet.node_sizes; +psizes = ns(ps); +self_size = ns(self); + +psucc = 1-pfail; + +CPT = zeros(prod(psizes), self_size); +pvals = zeros(1, length(ps)); +for i=1:prod(psizes) + pvals = ind2subv(psizes, i); + x = feval(fname, pvals); + %fprintf('%d ', [pvals x]); fprintf('\n'); + if psucc == 1 + CPT(i, x) = 1; + else + CPT(i, x) = psucc; + rest = mysetdiff(1:self_size, x); + CPT(i, rest) = pfail/length(rest); + end +end +CPT = reshape(CPT, [psizes self_size]); + +CPD = tabular_CPD(bnet, self, 'CPT',CPT, 'clamped',1); + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_lambda_msg.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_lambda_msg.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +function lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence) +% CPD_TO_LAMBDA_MSG Compute lambda message (discrete) +% lam_msg = compute_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence) +% Pearl p183 eq 4.52 + +switch msg_type + case 'd', + T = prod_CPT_and_pi_msgs(CPD, n, ps, msg, p); + mysize = length(msg{n}.lambda); + lambda = dpot(n, mysize, msg{n}.lambda); + T = multiply_by_pot(T, lambda); + lam_msg = pot_to_marginal(marginalize_pot(T, p)); + lam_msg = lam_msg.T; + case 'g', + error('discrete_CPD can''t create Gaussian msgs') +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_pi.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_pi.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,13 @@ +function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence) +% COMPUTE_PI Compute pi vector (discrete) +% pi = compute_pi(CPD, msg_type, n, ps, msg, evidence) +% Pearl p183 eq 4.51 + +switch msg_type + case 'd', + T = prod_CPT_and_pi_msgs(CPD, n, ps, msg); + pi = pot_to_marginal(marginalize_pot(T, n)); + pi = pi.T(:); + case 'g', + error('can only convert discrete CPD to Gaussian pi if observed') +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_scgpot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CPD_to_scgpot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,25 @@ +function pot = CPD_to_scgpot(CPD, domain, ns, cnodes, evidence) +% CPD_TO_SCGPOT Convert a CPD to a CG potential, incorporating any evidence (discrete) +% pot = CPD_to_scgpot(CPD, domain, ns, cnodes, evidence) +% +% domain is the domain of CPD. +% node_sizes(i) is the size of node i. +% cnodes +% evidence{i} is the evidence on the i'th node. + +%odom = domain(~isemptycell(evidence(domain))); + +%vals = cat(1, evidence{odom}); +%map = find_equiv_posns(odom, domain); +%index = mk_multi_index(length(domain), map, vals); +CPT = CPD_to_CPT(CPD); +%CPT = CPT(index{:}); +CPT = CPT(:); +%ns(odom) = 1; +potarray = cell(1, length(CPT)); +for i=1:length(CPT) + %p = CPT(i); + potarray{i} = scgcpot(0, 0, CPT(i)); + %scpot{i} = scpot(0, 0); +end +pot = scgpot(domain, [], [], ns, potarray); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,15 @@ +/CPD_to_lambda_msg.m/1.1.1.1/Wed May 29 15:59:52 2002// +/CPD_to_pi.m/1.1.1.1/Wed May 29 15:59:52 2002// +/CPD_to_scgpot.m/1.1.1.1/Wed May 29 15:59:52 2002// +/README/1.1.1.1/Wed May 29 15:59:52 2002// +/convert_CPD_to_table_hidden_ps.m/1.1.1.1/Wed May 29 15:59:52 2002// +/convert_obs_CPD_to_table.m/1.1.1.1/Wed May 29 15:59:52 2002// +/convert_to_pot.m/1.1.1.1/Fri Feb 20 22:00:38 2004// +/convert_to_sparse_table.c/1.1.1.1/Wed May 29 15:59:52 2002// +/convert_to_table.m/1.1.1.1/Wed May 29 15:59:52 2002// +/discrete_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002// +/dom_sizes.m/1.1.1.1/Wed May 29 15:59:52 2002// +/log_prob_node.m/1.1.1.1/Wed May 29 15:59:52 2002// +/prob_node.m/1.1.1.1/Wed May 29 15:59:52 2002// +/sample_node.m/1.1.1.1/Wed May 29 15:59:52 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Entries.Log --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Entries.Log Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,2 @@ +A D/Old//// +A D/private//// diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@discrete_CPD diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +/convert_to_pot.m/1.1.1.1/Wed May 29 15:59:52 2002// +/convert_to_table.m/1.1.1.1/Wed May 29 15:59:52 2002// +/prob_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002// +/prob_node.m/1.1.1.1/Wed May 29 15:59:52 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@discrete_CPD/Old diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/convert_to_pot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/convert_to_pot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,44 @@ +function pot = convert_to_pot(CPD, pot_type, domain, evidence) +% CONVERT_TO_POT Convert a tabular CPD to one or more potentials +% pots = convert_to_pot(CPD, pot_type, domain, evidence) +% +% pots{i} = CPD evaluated using evidence(domain(:,i)) +% If 'domains' is a single row vector, pots will be an object, not a cell array. + +ncases = size(domain,2); +assert(ncases==1); % not yet vectorized + +sz = dom_sizes(CPD); +ns = zeros(1, max(domain)); +ns(domain) = sz; + +local_ev = evidence(domain); +obs_bitv = ~isemptycell(local_ev); +odom = domain(obs_bitv); +T = convert_to_table(CPD, domain, local_ev, obs_bitv); + +switch pot_type + case 'u', + pot = upot(domain, sz, T, 0*myones(sz)); + case 'd', + ns(odom) = 1; + pot = dpot(domain, ns(domain), T); + case {'c','g'}, + % Since we want the output to be a Gaussian, the whole family must be observed. + % In other words, the potential is really just a constant. + p = T; + %p = prob_node(CPD, evidence(domain(end)), evidence(domain(1:end-1))); + ns(domain) = 0; + pot = cpot(domain, ns(domain), log(p)); + case 'cg', + T = T(:); + ns(odom) = 1; + can = cell(1, length(T)); + for i=1:length(T) + can{i} = cpot([], [], log(T(i))); + end + pot = cgpot(domain, [], ns, can); + otherwise, + error(['unrecognized pot type ' pot_type]) +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/convert_to_table.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/convert_to_table.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,23 @@ +function T = convert_to_table(CPD, domain, local_ev, obs_bitv) +% CONVERT_TO_TABLE Convert a discrete CPD to a table +% function T = convert_to_table(CPD, domain, local_ev, obs_bitv) +% +% We convert the CPD to a CPT, and then lookup the evidence on the discrete parents. +% The resulting table can easily be converted to a potential. + + +CPT = CPD_to_CPT(CPD); +obs_child_only = ~any(obs_bitv(1:end-1)) & obs_bitv(end); + +if obs_child_only + sz = size(CPT); + CPT = reshape(CPT, prod(sz(1:end-1)), sz(end)); + o = local_ev{end}; + T = CPT(:, o); +else + odom = domain(obs_bitv); + vals = cat(1, local_ev{find(obs_bitv)}); % undo cell array + map = find_equiv_posns(odom, domain); + index = mk_multi_index(length(domain), map, vals); + T = CPT(index{:}); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/prob_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/prob_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,25 @@ +function p = prob_CPD(CPD, domain, ns, cnodes, evidence) +% PROB_CPD Compute prob of a node given evidence on the parents (discrete) +% p = prob_CPD(CPD, domain, ns, cnodes, evidence) +% +% domain is the domain of CPD. +% node_sizes(i) is the size of node i. +% cnodes = all the cts nodes +% evidence{i} is the evidence on the i'th node. + +ps = domain(1:end-1); +self = domain(end); +CPT = CPD_to_CPT(CPD); + +if isempty(ps) + T = CPT; +else + assert(~any(isemptycell(evidence(ps)))); + pvals = cat(1, evidence{ps}); + i = subv2ind(ns(ps), pvals(:)'); + T = reshape(CPT, [prod(ns(ps)) ns(self)]); + T = T(i,:); +end +p = T(evidence{self}); + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/prob_node.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/Old/prob_node.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,51 @@ +function [P, p] = prob_node(CPD, self_ev, pev) +% PROB_NODE Compute prod_m P(x(i,m)| x(pi_i,m), theta_i) for node i (discrete) +% [P, p] = prob_node(CPD, self_ev, pev) +% +% self_ev(m) is the evidence on this node in case m. +% pev(i,m) is the evidence on the i'th parent in case m (if there are any parents). +% (These may also be cell arrays.) +% +% p(m) = P(x(i,m)| x(pi_i,m), theta_i) +% P = prod p(m) + +if iscell(self_ev), usecell = 1; else usecell = 0; end + +ncases = length(self_ev); +sz = dom_sizes(CPD); + +nparents = length(sz)-1; +if nparents == 0 + assert(isempty(pev)); +else + assert(isequal(size(pev), [nparents ncases])); +end + +n = length(sz); +dom = 1:n; +p = zeros(1, ncases); +if nparents == 0 + for m=1:ncases + if usecell + evidence = {self_ev{m}}; + else + evidence = num2cell(self_ev(m)); + end + T = convert_to_table(CPD, dom, evidence); + p(m) = T; + end +else + for m=1:ncases + if usecell + evidence = cell(1,n); + evidence(1:n-1) = pev(:,m); + evidence(n) = self_ev(m); + else + evidence = num2cell([pev(:,m)', self_ev(m)]); + end + T = convert_to_table(CPD, dom, evidence); + p(m) = T; + end +end +P = prod(p); + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/README --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/README Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +Any CPD on a discrete child with discrete parents +can be represented as a table (although this might be quite big). +discrete_CPD uses this tabular representation to implement various +functions. Subtypes are free to implement more efficient versions. + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_CPD_to_table_hidden_ps.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_CPD_to_table_hidden_ps.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,20 @@ +function T = convert_CPD_to_table_hidden_ps(CPD, child_obs) +% CONVERT_CPD_TO_TABLE_HIDDEN_PS Convert a discrete CPD to a table +% T = convert_CPD_to_table_hidden_ps(CPD, child_obs) +% +% This is like convert_to_table, except that we are guaranteed that +% none of the parents have evidence on them. +% child_obs may be an integer (1,2,...) or []. + +CPT = CPD_to_CPT(CPD); +if isempty(child_obs) + T = CPT(:); +else + sz = dom_sizes(CPD); + if length(sz)==1 % no parents + T = CPT(child_obs); + else + CPT = reshape(CPT, prod(sz(1:end-1)), sz(end)); + T = CPT(:, child_obs); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_obs_CPD_to_table.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_obs_CPD_to_table.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,13 @@ +function T = convert_to_table(CPD, domain, evidence) +% CONVERT_TO_TABLE Convert a discrete CPD to a table +% T = convert_to_table(CPD, domain, evidence) +% +% We convert the CPD to a CPT, and then lookup the evidence on the discrete parents. +% The resulting table can easily be converted to a potential. + +CPT = CPD_to_CPT(CPD); +odom = domain(~isemptycell(evidence(domain))); +vals = cat(1, evidence{odom}); +map = find_equiv_posns(odom, domain); +index = mk_multi_index(length(domain), map, vals); +T = CPT(index{:}); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_to_pot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_to_pot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,62 @@ +function pot = convert_to_pot(CPD, pot_type, domain, evidence) +% CONVERT_TO_POT Convert a discrete CPD to a potential +% pot = convert_to_pot(CPD, pot_type, domain, evidence) +% +% pots = CPD evaluated using evidence(domain) + +ncases = size(domain,2); +assert(ncases==1); % not yet vectorized + +sz = dom_sizes(CPD); +ns = zeros(1, max(domain)); +ns(domain) = sz; + +CPT1 = CPD_to_CPT(CPD); +spar = issparse(CPT1); +odom = domain(~isemptycell(evidence(domain))); +if spar + T = convert_to_sparse_table(CPD, domain, evidence); +else + T = convert_to_table(CPD, domain, evidence); +end + +switch pot_type + case 'u', + pot = upot(domain, sz, T, 0*myones(sz)); + case 'd', + ns(odom) = 1; + pot = dpot(domain, ns(domain), T); + case {'c','g'}, + % Since we want the output to be a Gaussian, the whole family must be observed. + % In other words, the potential is really just a constant. + p = T; + %p = prob_node(CPD, evidence(domain(end)), evidence(domain(1:end-1))); + ns(domain) = 0; + pot = cpot(domain, ns(domain), log(p)); + + case 'cg', + T = T(:); + ns(odom) = 1; + can = cell(1, length(T)); + for i=1:length(T) + if T(i) == 0 + can{i} = cpot([], [], -Inf); % bug fix by Bob Welch 20/2/04 + else + can{i} = cpot([], [], log(T(i))); + end; + end + pot = cgpot(domain, [], ns, can); + + case 'scg' + T = T(:); + ns(odom) = 1; + pot_array = cell(1, length(T)); + for i=1:length(T) + pot_array{i} = scgcpot([], [], T(i)); + end + pot = scgpot(domain, [], [], ns, pot_array); + + otherwise, + error(['unrecognized pot type ' pot_type]) +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_to_sparse_table.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@discrete_CPD/convert_to_sparse_table.c Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,154 @@ +/* convert_to_sparse_table.c convert a sparse discrete CPD with evidence into sparse table */ +/* convert_to_pot.m located in ../CPDs/discrete_CPD call it */ +/* 3 input */ +/* CPD prhs[0] with 1D sparse CPT */ +/* domain prhs[1] */ +/* evidence prhs[2] */ +/* 1 output */ +/* T plhs[0] sparse table */ + +#include +#include "mex.h" + +void ind_subv(int index, const int *cumprod, const int n, int *bsubv){ + int i; + + for (i = n-1; i >= 0; i--) { + bsubv[i] = ((int)floor(index / cumprod[i])); + index = index % cumprod[i]; + } +} + +int subv_ind(const int n, const int *cumprod, const int *subv){ + int i, index=0; + + for(i=0; i 1e-3) | isinf(P) + if isinf(P) % Y is observed + Sigma_lambda = zeros(self_size, self_size); % infinite precision => 0 variance + mu_lambda = msg{n}.lambda.mu; % observed_value; + else + Sigma_lambda = inv(P); + mu_lambda = Sigma_lambda * msg{n}.lambda.info_state; + end + C = inv(Sigma_lambda + BSigma); + lam_msg.precision = Bi' * C * Bi; + lam_msg.info_state = Bi' * C * (mu_lambda - Bmu); + else + % method that uses matrix inversion lemma to avoid inverting P + A = inv(P + inv(BSigma)); + C = P - P*A*P; + lam_msg.precision = Bi' * C * Bi; + D = eye(self_size) - P*A; + z = msg{n}.lambda.info_state; + lam_msg.info_state = Bi' * (D*z - D*P*Bmu); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CPD_to_pi.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CPD_to_pi.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence) +% CPD_TO_PI Compute the pi vector (gaussian) +% function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence) + +switch msg_type + case 'd', + error('gaussian_CPD can''t create discrete msgs') + case 'g', + [m, Q, W] = gaussian_CPD_params_given_dps(CPD, [ps n], evidence); + cps = ps(CPD.cps); + cpsizes = CPD.sizes(CPD.cps); + pi.mu = m; + pi.Sigma = Q; + for k=1:length(cps) % only get pi msgs from cts parents + %bk = block(k, cpsizes); + bk = CPD.cps_block_ndx{k}; + Bk = W(:, bk); + m = msg{n}.pi_from_parent{k}; + pi.Sigma = pi.Sigma + Bk * m.Sigma * Bk'; + pi.mu = pi.mu + Bk * m.mu; % m.mu = u(k) + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CPD_to_scgpot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CPD_to_scgpot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,58 @@ +function pot = CPD_to_scgpot(CPD, domain, ns, cnodes, evidence) +% CPD_TO_CGPOT Convert a Gaussian CPD to a CG potential, incorporating any evidence +% pot = CPD_to_cgpot(CPD, domain, ns, cnodes, evidence) + +self = CPD.self; +dnodes = mysetdiff(1:length(ns), cnodes); +odom = domain(~isemptycell(evidence(domain))); +cdom = myintersect(cnodes, domain); +cheaddom = myintersect(self, domain); +ctaildom = mysetdiff(cdom,cheaddom); +ddom = myintersect(dnodes, domain); +cobs = myintersect(cdom, odom); +dobs = myintersect(ddom, odom); +ens = ns; % effective node size +ens(cobs) = 0; +ens(dobs) = 1; + +% Extract the params compatible with the observations (if any) on the discrete parents (if any) +% parents are all but the last domain element +ps = domain(1:end-1); +dps = myintersect(ps, ddom); +dops = myintersect(dps, odom); + +map = find_equiv_posns(dops, dps); +dpvals = cat(1, evidence{dops}); +index = mk_multi_index(length(dps), map, dpvals); + +dpsize = prod(ens(dps)); +cpsize = size(CPD.weights(:,:,1), 2); % cts parents size +ss = size(CPD.mean, 1); % self size +% the reshape acts like a squeeze +m = reshape(CPD.mean(:, index{:}), [ss dpsize]); +C = reshape(CPD.cov(:, :, index{:}), [ss ss dpsize]); +W = reshape(CPD.weights(:, :, index{:}), [ss cpsize dpsize]); + + +% Convert each conditional Gaussian to a canonical potential +pot = cell(1, dpsize); +for i=1:dpsize + %pot{i} = linear_gaussian_to_scgcpot(m(:,i), C(:,:,i), W(:,:,i), cdom, ns, cnodes, evidence); + pot{i} = scgcpot(ss, cpsize, 1, m(:,i), W(:,:,i), C(:,:,i)); +end + +pot = scgpot(ddom, cheaddom, ctaildom, ens, pot); + + +function pot = linear_gaussian_to_scgcpot(mu, Sigma, W, domain, ns, cnodes, evidence) +% LINEAR_GAUSSIAN_TO_CPOT Convert a linear Gaussian CPD to a stable conditional potential element. +% pot = linear_gaussian_to_cpot(mu, Sigma, W, domain, ns, cnodes, evidence) + +p = 1; +A = mu; +B = W; +C = Sigma; +ns(odom) = 0; +%pot = scgcpot(, ns(domain), p, A, B, C); + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,20 @@ +/CPD_to_lambda_msg.m/1.1.1.1/Wed May 29 15:59:52 2002// +/CPD_to_pi.m/1.1.1.1/Wed May 29 15:59:52 2002// +/CPD_to_scgpot.m/1.1.1.1/Wed May 29 15:59:52 2002// +/adjustable_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002// +/convert_CPD_to_table_hidden_ps.m/1.1.1.1/Wed May 29 15:59:52 2002// +/convert_to_pot.m/1.1.1.1/Sun Mar 9 23:03:16 2003// +/convert_to_table.m/1.1.1.1/Sun May 11 23:31:54 2003// +/display.m/1.1.1.1/Wed May 29 15:59:52 2002// +/gaussian_CPD.m/1.1.1.1/Wed Jun 15 21:13:06 2005// +/gaussian_CPD_params_given_dps.m/1.1.1.1/Sun May 11 23:13:40 2003// +/get_field.m/1.1.1.1/Wed May 29 15:59:52 2002// +/learn_params.m/1.1.1.1/Thu Jun 10 01:28:10 2004// +/log_prob_node.m/1.1.1.1/Tue Sep 10 17:44:00 2002// +/maximize_params.m/1.1.1.1/Tue May 20 14:10:06 2003// +/maximize_params_debug.m/1.1.1.1/Fri Jan 31 00:13:10 2003// +/reset_ess.m/1.1.1.1/Wed May 29 15:59:52 2002// +/sample_node.m/1.1.1.1/Wed May 29 15:59:52 2002// +/set_fields.m/1.1.1.1/Wed May 29 15:59:52 2002// +/update_ess.m/1.1.1.1/Tue Jul 22 22:55:46 2003// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Entries.Log --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Entries.Log Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,2 @@ +A D/Old//// +A D/private//// diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@gaussian_CPD diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CPD_to_lambda_msg.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CPD_to_lambda_msg.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,64 @@ +function lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p) +% CPD_TO_LAMBDA_MSG Compute lambda message (gaussian) +% lam_msg = compute_lambda_msg(CPD, msg_type, n, ps, msg, p) +% Pearl p183 eq 4.52 + +switch msg_type + case 'd', + error('gaussian_CPD can''t create discrete msgs') + case 'g', + self_size = CPD.sizes(end); + if all(msg{n}.lambda.precision == 0) % no info to send on + lam_msg.precision = zeros(self_size); + lam_msg.info_state = zeros(self_size, 1); + return; + end + cpsizes = CPD.sizes(CPD.cps); + dpval = 1; + Q = CPD.cov(:,:,dpval); + Sigmai = Q; + wmu = zeros(self_size, 1); + for k=1:length(ps) + pk = ps(k); + if pk ~= p + bk = block(k, cpsizes); + Bk = CPD.weights(:, bk, dpval); + m = msg{n}.pi_from_parent{k}; + Sigmai = Sigmai + Bk * m.Sigma * Bk'; + wmu = wmu + Bk * m.mu; % m.mu = u(k) + end + end + % Sigmai = Q + sum_{k \neq i} B_k Sigma_k B_k' + i = find_equiv_posns(p, ps); + bi = block(i, cpsizes); + Bi = CPD.weights(:,bi, dpval); + + if 0 + P = msg{n}.lambda.precision; + if isinf(P) % inv(P)=Sigma_lambda=0 + precision_temp = inv(Sigmai); + lam_msg.precision = Bi' * precision_temp * Bi; + lam_msg.info_state = precision_temp * (msg{n}.lambda.mu - wmu); + else + A = inv(P + inv(Sigmai)); + precision_temp = P + P*A*P; + lam_msg.precision = Bi' * precision_temp * Bi; + self_size = length(P); + C = eye(self_size) + P*A; + z = msg{n}.lambda.info_state; + lam_msg.info_state = C*z - C*P*wmu; + end + end + + if isinf(msg{n}.lambda.precision) + Sigma_lambda = zeros(self_size, self_size); % infinite precision => 0 variance + mu_lambda = msg{n}.lambda.mu; % observed_value; + else + Sigma_lambda = inv(msg{n}.lambda.precision); + mu_lambda = Sigma_lambda * msg{n}.lambda.info_state; + end + precision_temp = inv(Sigma_lambda + Sigmai); + lam_msg.precision = Bi' * precision_temp * Bi; + lam_msg.info_state = Bi' * precision_temp * (mu_lambda - wmu); +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,7 @@ +/CPD_to_lambda_msg.m/1.1.1.1/Wed May 29 15:59:52 2002// +/gaussian_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002// +/log_prob_node.m/1.1.1.1/Wed May 29 15:59:52 2002// +/maximize_params.m/1.1.1.1/Thu Jan 30 22:38:16 2003// +/update_ess.m/1.1.1.1/Wed May 29 15:59:52 2002// +/update_tied_ess.m/1.1.1.1/Wed May 29 15:59:52 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@gaussian_CPD/Old diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/gaussian_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/gaussian_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,184 @@ +function CPD = gaussian_CPD(varargin) +% GAUSSIAN_CPD Make a conditional linear Gaussian distrib. +% +% To define this CPD precisely, call the continuous (cts) parents (if any) X, +% the discrete parents (if any) Q, and this node Y. Then the distribution on Y is: +% - no parents: Y ~ N(mu, Sigma) +% - cts parents : Y|X=x ~ N(mu + W x, Sigma) +% - discrete parents: Y|Q=i ~ N(mu(i), Sigma(i)) +% - cts and discrete parents: Y|X=x,Q=i ~ N(mu(i) + W(i) x, Sigma(i)) +% +% CPD = gaussian_CPD(bnet, node, ...) will create a CPD with random parameters, +% where node is the number of a node in this equivalence class. +% +% The list below gives optional arguments [default value in brackets]. +% (Let ns(i) be the size of node i, X = ns(X), Y = ns(Y) and Q = prod(ns(Q)).) +% +% mean - mu(:,i) is the mean given Q=i [ randn(Y,Q) ] +% cov - Sigma(:,:,i) is the covariance given Q=i [ repmat(eye(Y,Y), [1 1 Q]) ] +% weights - W(:,:,i) is the regression matrix given Q=i [ randn(Y,X,Q) ] +% cov_type - if 'diag', Sigma(:,:,i) is diagonal [ 'full' ] +% tied_cov - if 1, we constrain Sigma(:,:,i) to be the same for all i [0] +% clamp_mean - if 1, we do not adjust mu(:,i) during learning [0] +% clamp_cov - if 1, we do not adjust Sigma(:,:,i) during learning [0] +% clamp_weights - if 1, we do not adjust W(:,:,i) during learning [0] +% cov_prior_weight - weight given to I prior for estimating Sigma [0.01] +% +% e.g., CPD = gaussian_CPD(bnet, i, 'mean', [0; 0], 'clamp_mean', 'yes') +% +% For backwards compatibility with BNT2, you can also specify the parameters in the following order +% CPD = gaussian_CPD(bnet, self, mu, Sigma, W, cov_type, tied_cov, clamp_mean, clamp_cov, clamp_weight) +% +% Sometimes it is useful to create an "isolated" CPD, without needing to pass in a bnet. +% In this case, you must specify the discrete and cts parents (dps, cps) and the family sizes, followed +% by the optional arguments above: +% CPD = gaussian_CPD('self', i, 'dps', dps, 'cps', cps, 'sz', fam_size, ...) + + +if nargin==0 + % This occurs if we are trying to load an object from a file. + CPD = init_fields; + clamp = 0; + CPD = class(CPD, 'gaussian_CPD', generic_CPD(clamp)); + return; +elseif isa(varargin{1}, 'gaussian_CPD') + % This might occur if we are copying an object. + CPD = varargin{1}; + return; +end +CPD = init_fields; + +CPD = class(CPD, 'gaussian_CPD', generic_CPD(0)); + + +% parse mandatory arguments +if ~isstr(varargin{1}) % pass in bnet + bnet = varargin{1}; + self = varargin{2}; + args = varargin(3:end); + ns = bnet.node_sizes; + ps = parents(bnet.dag, self); + dps = myintersect(ps, bnet.dnodes); + cps = myintersect(ps, bnet.cnodes); + fam_sz = ns([ps self]); +else + disp('parsing new style') + for i=1:2:length(varargin) + switch varargin{i}, + case 'self', self = varargin{i+1}; + case 'dps', dps = varargin{i+1}; + case 'cps', cps = varargin{i+1}; + case 'sz', fam_sz = varargin{i+1}; + end + end + ps = myunion(dps, cps); + args = varargin; +end + +CPD.self = self; +CPD.sizes = fam_sz; + +% Figure out which (if any) of the parents are discrete, and which cts, and how big they are +% dps = discrete parents, cps = cts parents +CPD.cps = find_equiv_posns(cps, ps); % cts parent index +CPD.dps = find_equiv_posns(dps, ps); +ss = fam_sz(end); +psz = fam_sz(1:end-1); +dpsz = prod(psz(CPD.dps)); +cpsz = sum(psz(CPD.cps)); + +% set default params +CPD.mean = randn(ss, dpsz); +CPD.cov = 100*repmat(eye(ss), [1 1 dpsz]); +CPD.weights = randn(ss, cpsz, dpsz); +CPD.cov_type = 'full'; +CPD.tied_cov = 0; +CPD.clamped_mean = 0; +CPD.clamped_cov = 0; +CPD.clamped_weights = 0; +CPD.cov_prior_weight = 0.01; + +nargs = length(args); +if nargs > 0 + if ~isstr(args{1}) + % gaussian_CPD(bnet, self, mu, Sigma, W, cov_type, tied_cov, clamp_mean, clamp_cov, clamp_weights) + if nargs >= 1 & ~isempty(args{1}), CPD.mean = args{1}; end + if nargs >= 2 & ~isempty(args{2}), CPD.cov = args{2}; end + if nargs >= 3 & ~isempty(args{3}), CPD.weights = args{3}; end + if nargs >= 4 & ~isempty(args{4}), CPD.cov_type = args{4}; end + if nargs >= 5 & ~isempty(args{5}) & strcmp(args{5}, 'tied'), CPD.tied_cov = 1; end + if nargs >= 6 & ~isempty(args{6}), CPD.clamped_mean = 1; end + if nargs >= 7 & ~isempty(args{7}), CPD.clamped_cov = 1; end + if nargs >= 8 & ~isempty(args{8}), CPD.clamped_weights = 1; end + else + CPD = set_fields(CPD, args{:}); + end +end + +% Make sure the matrices have 1 dimension per discrete parent. +% Bug fix due to Xuejing Sun 3/6/01 +CPD.mean = myreshape(CPD.mean, [ss ns(dps)]); +CPD.cov = myreshape(CPD.cov, [ss ss ns(dps)]); +CPD.weights = myreshape(CPD.weights, [ss cpsz ns(dps)]); + +CPD.init_cov = CPD.cov; % we reset to this if things go wrong during learning + +% expected sufficient statistics +CPD.Wsum = zeros(dpsz,1); +CPD.WYsum = zeros(ss, dpsz); +CPD.WXsum = zeros(cpsz, dpsz); +CPD.WYYsum = zeros(ss, ss, dpsz); +CPD.WXXsum = zeros(cpsz, cpsz, dpsz); +CPD.WXYsum = zeros(cpsz, ss, dpsz); + +% For BIC +CPD.nsamples = 0; +switch CPD.cov_type + case 'full', + ncov_params = ss*(ss-1)/2; % since symmetric (and positive definite) + case 'diag', + ncov_params = ss; + otherwise + error(['unrecognized cov_type ' cov_type]); +end +% params = weights + mean + cov +if CPD.tied_cov + CPD.nparams = ss*cpsz*dpsz + ss*dpsz + ncov_params; +else + CPD.nparams = ss*cpsz*dpsz + ss*dpsz + dpsz*ncov_params; +end + + + +clamped = CPD.clamped_mean & CPD.clamped_cov & CPD.clamped_weights; +CPD = set_clamped(CPD, clamped); + +%%%%%%%%%%% + +function CPD = init_fields() +% This ensures we define the fields in the same order +% no matter whether we load an object from a file, +% or create it from scratch. (Matlab requires this.) + +CPD.self = []; +CPD.sizes = []; +CPD.cps = []; +CPD.dps = []; +CPD.mean = []; +CPD.cov = []; +CPD.weights = []; +CPD.clamped_mean = []; +CPD.clamped_cov = []; +CPD.clamped_weights = []; +CPD.init_cov = []; +CPD.cov_type = []; +CPD.tied_cov = []; +CPD.Wsum = []; +CPD.WYsum = []; +CPD.WXsum = []; +CPD.WYYsum = []; +CPD.WXXsum = []; +CPD.WXYsum = []; +CPD.nsamples = []; +CPD.nparams = []; +CPD.cov_prior_weight = []; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/log_prob_node.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/log_prob_node.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,59 @@ +function L = log_prob_node(CPD, self_ev, pev) +% LOG_PROB_NODE Compute prod_m log P(x(i,m)| x(pi_i,m), theta_i) for node i (gaussian) +% L = log_prob_node(CPD, self_ev, pev) +% +% self_ev(m) is the evidence on this node in case m. +% pev(i,m) is the evidence on the i'th parent in case m (if there are any parents). +% (These may also be cell arrays.) + +if iscell(self_ev), usecell = 1; else usecell = 0; end + +use_log = 1; +ncases = length(self_ev); +nparents = length(CPD.sizes)-1; +assert(ncases == size(pev, 2)); + +if ncases == 0 + L = 0; + return; +end + +if length(CPD.dps)==0 % no discrete parents, so we can vectorize + i = 1; + if usecell + Y = cell2num(self_ev); + else + Y = self_ev; + end + if length(CPD.cps) == 0 + L = gaussian_prob(Y, CPD.mean(:,i), CPD.cov(:,:,i), use_log); + else + if usecell + X = cell2num(pev); + else + X = pev; + end + L = gaussian_prob(Y, CPD.mean(:,i) + CPD.weights(:,:,i)*X, CPD.cov(:,:,i), use_log); + end +else % each case uses a (potentially) different set of parameters + L = 0; + for m=1:ncases + if usecell + dpvals = cat(1, pev{CPD.dps, m}); + else + dpvals = pev(CPD.dps, m); + end + i = subv2ind(CPD.sizes(CPD.dps), dpvals(:)'); + y = self_ev{m}; + if length(CPD.cps) == 0 + L = L + gaussian_prob(y, CPD.mean(:,i), CPD.cov(:,:,i), use_log); + else + if usecell + x = cat(1, pev{CPD.cps, m}); + else + x = pev(CPD.cps, m); + end + L = L + gaussian_prob(y, CPD.mean(:,i) + CPD.weights(:,:,i)*x, CPD.cov(:,:,i), use_log); + end + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/maximize_params.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/maximize_params.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,147 @@ +function CPD = maximize_params(CPD, temp) +% MAXIMIZE_PARAMS Set the params of a CPD to their ML values (Gaussian) +% CPD = maximize_params(CPD, temperature) +% +% Temperature is currently only used for entropic prior on Sigma + +% For details, see "Fitting a Conditional Gaussian Distribution", Kevin Murphy, tech. report, +% 1998, available at www.cs.berkeley.edu/~murphyk/papers.html +% Refering to table 2, we use equations 1/2 to estimate the covariance matrix in the untied/tied case, +% and equation 9 to estimate the weight matrix and mean. +% We do not implement spherical Gaussians - the code is already pretty complicated! + +if ~adjustable_CPD(CPD), return; end + +%assert(approxeq(CPD.nsamples, sum(CPD.Wsum))); +assert(~any(isnan(CPD.WXXsum))) +assert(~any(isnan(CPD.WXYsum))) +assert(~any(isnan(CPD.WYYsum))) + +[self_size cpsize dpsize] = size(CPD.weights); + +% Append 1s to the parents, and derive the corresponding cross products. +% This is used when estimate the means and weights simultaneosuly, +% and when estimatting Sigma. +% Let x2 = [x 1]' +XY = zeros(cpsize+1, self_size, dpsize); % XY(:,:,i) = sum_l w(l,i) x2(l) y(l)' +XX = zeros(cpsize+1, cpsize+1, dpsize); % XX(:,:,i) = sum_l w(l,i) x2(l) x2(l)' +YY = zeros(self_size, self_size, dpsize); % YY(:,:,i) = sum_l w(l,i) y(l) y(l)' +for i=1:dpsize + XY(:,:,i) = [CPD.WXYsum(:,:,i) % X*Y + CPD.WYsum(:,i)']; % 1*Y + % [x * [x' 1] = [xx' x + % 1] x' 1] + XX(:,:,i) = [CPD.WXXsum(:,:,i) CPD.WXsum(:,i); + CPD.WXsum(:,i)' CPD.Wsum(i)]; + YY(:,:,i) = CPD.WYYsum(:,:,i); +end + +w = CPD.Wsum(:); +% Set any zeros to one before dividing +% This is valid because w(i)=0 => WYsum(:,i)=0, etc +w = w + (w==0); + +if CPD.clamped_mean + % Estimating B2 and then setting the last column (the mean) to the clamped mean is *not* equivalent + % to estimating B and then adding the clamped_mean to the last column. + if ~CPD.clamped_weights + B = zeros(self_size, cpsize, dpsize); + for i=1:dpsize + if det(CPD.WXXsum(:,:,i))==0 + B(:,:,i) = 0; + else + % Eqn 9 in table 2 of TR + %B(:,:,i) = CPD.WXYsum(:,:,i)' * inv(CPD.WXXsum(:,:,i)); + B(:,:,i) = (CPD.WXXsum(:,:,i) \ CPD.WXYsum(:,:,i))'; + end + end + %CPD.weights = reshape(B, [self_size cpsize dpsize]); + CPD.weights = B; + end +elseif CPD.clamped_weights % KPM 1/25/02 + if ~CPD.clamped_mean % ML estimate is just sample mean of the residuals + for i=1:dpsize + CPD.mean(:,i) = (CPD.WYsum(:,i) - CPD.weights(:,:,i) * CPD.WXsum(:,i)) / w(i); + end + end +else % nothing is clamped, so estimate mean and weights simultaneously + B2 = zeros(self_size, cpsize+1, dpsize); + for i=1:dpsize + if det(XX(:,:,i))==0 % fix by U. Sondhauss 6/27/99 + B2(:,:,i)=0; + else + % Eqn 9 in table 2 of TR + %B2(:,:,i) = XY(:,:,i)' * inv(XX(:,:,i)); + B2(:,:,i) = (XX(:,:,i) \ XY(:,:,i))'; + end + CPD.mean(:,i) = B2(:,cpsize+1,i); + CPD.weights(:,:,i) = B2(:,1:cpsize,i); + end +end + +% Let B2 = [W mu] +if cpsize>0 + B2(:,1:cpsize,:) = reshape(CPD.weights, [self_size cpsize dpsize]); +end +B2(:,cpsize+1,:) = reshape(CPD.mean, [self_size dpsize]); + +% To avoid singular covariance matrices, +% we use the regularization method suggested in "A Quasi-Bayesian approach to estimating +% parameters for mixtures of normal distributions", Hamilton 91. +% If the ML estimate is Sigma = M/N, the MAP estimate is (M+gamma*I) / (N+gamma), +% where gamma >=0 is a smoothing parameter (equivalent sample size of I prior) + +gamma = CPD.cov_prior_weight; + +if ~CPD.clamped_cov + if CPD.cov_prior_entropic % eqn 12 of Brand AI/Stat 99 + Z = 1-temp; + % When temp > 1, Z is negative, so we are dividing by a smaller + % number, ie. increasing the variance. + else + Z = 0; + end + if CPD.tied_cov + S = zeros(self_size, self_size); + % Eqn 2 from table 2 in TR + for i=1:dpsize + S = S + (YY(:,:,i) - B2(:,:,i)*XY(:,:,i)); + end + %denom = max(1, CPD.nsamples + gamma + Z); + denom = CPD.nsamples + gamma + Z; + S = (S + gamma*eye(self_size)) / denom; + if strcmp(CPD.cov_type, 'diag') + S = diag(diag(S)); + end + CPD.cov = repmat(S, [1 1 dpsize]); + else + for i=1:dpsize + % Eqn 1 from table 2 in TR + S = YY(:,:,i) - B2(:,:,i)*XY(:,:,i); + %denom = max(1, w(i) + gamma + Z); % gives wrong answers on mhmm1 + denom = w(i) + gamma + Z; + S = (S + gamma*eye(self_size)) / denom; + CPD.cov(:,:,i) = S; + end + if strcmp(CPD.cov_type, 'diag') + for i=1:dpsize + CPD.cov(:,:,i) = diag(diag(CPD.cov(:,:,i))); + end + end + end +end + + +check_covars = 0; +min_covar = 1e-5; +if check_covars % prevent collapsing to a point + for i=1:dpsize + if min(svd(CPD.cov(:,:,i))) < min_covar + disp(['resetting singular covariance for node ' num2str(CPD.self)]); + CPD.cov(:,:,i) = CPD.init_cov(:,:,i); + end + end +end + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/update_ess.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/update_ess.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,85 @@ +function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv) +% UPDATE_ESS Update the Expected Sufficient Statistics of a Gaussian node +% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv) + +%if nargin < 6 +% hidden_bitv = zeros(1, max(fmarginal.domain)); +% hidden_bitv(find(isempty(evidence)))=1; +%end + +dom = fmarginal.domain; +self = dom(end); +ps = dom(1:end-1); +hidden_self = hidden_bitv(self); +cps = myintersect(ps, cnodes); +dps = mysetdiff(ps, cps); +hidden_cps = all(hidden_bitv(cps)); +hidden_dps = all(hidden_bitv(dps)); + +CPD.nsamples = CPD.nsamples + 1; +[ss cpsz dpsz] = size(CPD.weights); % ss = self size + +% Let X be the cts parent (if any), Y be the cts child (self). + +if ~hidden_self & (isempty(cps) | ~hidden_cps) & hidden_dps % all cts nodes are observed, all discrete nodes are hidden + % Since X and Y are observed, SYY = 0, SXX = 0, SXY = 0 + % Since discrete parents are hidden, we do not need to add evidence to w. + w = fmarginal.T(:); + CPD.Wsum = CPD.Wsum + w; + y = evidence{self}; + Cyy = y*y'; + if ~CPD.useC + W = repmat(w(:)',ss,1); % W(y,i) = w(i) + W2 = repmat(reshape(W, [ss 1 dpsz]), [1 ss 1]); % W2(x,y,i) = w(i) + CPD.WYsum = CPD.WYsum + W .* repmat(y(:), 1, dpsz); + CPD.WYYsum = CPD.WYYsum + W2 .* repmat(reshape(Cyy, [ss ss 1]), [1 1 dpsz]); + else + W = w(:)'; + W2 = reshape(W, [1 1 dpsz]); + CPD.WYsum = CPD.WYsum + rep_mult(W, y(:), size(CPD.WYsum)); + CPD.WYYsum = CPD.WYYsum + rep_mult(W2, Cyy, size(CPD.WYYsum)); + end + if cpsz > 0 % X exists + x = cat(1, evidence{cps}); x = x(:); + Cxx = x*x'; + Cxy = x*y'; + if ~CPD.useC + CPD.WXsum = CPD.WXsum + W .* repmat(x(:), 1, dpsz); + CPD.WXXsum = CPD.WXXsum + W2 .* repmat(reshape(Cxx, [cpsz cpsz 1]), [1 1 dpsz]); + CPD.WXYsum = CPD.WXYsum + W2 .* repmat(reshape(Cxy, [cpsz ss 1]), [1 1 dpsz]); + else + CPD.WXsum = CPD.WXsum + rep_mult(W, x(:), size(CPD.WXsum)); + CPD.WXXsum = CPD.WXXsum + rep_mult(W2, Cxx, size(CPD.WXXsum)); + CPD.WXYsum = CPD.WXYsum + rep_mult(W2, Cxy, size(CPD.WXYsum)); + end + end + return; +end + +% general (non-vectorized) case +fullm = add_evidence_to_gmarginal(fmarginal, evidence, ns, cnodes); % slow! + +if dpsz == 1 % no discrete parents + w = 1; +else + w = fullm.T(:); +end + +CPD.Wsum = CPD.Wsum + w; +xi = 1:cpsz; +yi = (cpsz+1):(cpsz+ss); +for i=1:dpsz + muY = fullm.mu(yi, i); + SYY = fullm.Sigma(yi, yi, i); + CPD.WYsum(:,i) = CPD.WYsum(:,i) + w(i)*muY; + CPD.WYYsum(:,:,i) = CPD.WYYsum(:,:,i) + w(i)*(SYY + muY*muY'); % E[X Y] = Cov[X,Y] + E[X] E[Y] + if cpsz > 0 + muX = fullm.mu(xi, i); + SXX = fullm.Sigma(xi, xi, i); + SXY = fullm.Sigma(xi, yi, i); + CPD.WXsum(:,i) = CPD.WXsum(:,i) + w(i)*muX; + CPD.WXXsum(:,:,i) = CPD.WXXsum(:,:,i) + w(i)*(SXX + muX*muX'); + CPD.WXYsum(:,:,i) = CPD.WXYsum(:,:,i) + w(i)*(SXY + muX*muY'); + end +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/update_tied_ess.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/Old/update_tied_ess.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,118 @@ +function CPD = update_tied_ess(CPD, domain, engine, evidence, ns, cnodes) + +if ~adjustable_CPD(CPD), return; end +nCPDs = size(domain, 2); +fmarginal = cell(1, nCPDs); +for l=1:nCPDs + fmarginal{l} = marginal_family(engine, nodes(l)); +end + +[ss cpsz dpsz] = size(CPD.weights); +if const_evidence_pattern(engine) + dom = domain(:,1); + dnodes = mysetdiff(1:length(ns), cnodes); + ddom = myintersect(dom, dnodes); + cdom = myintersect(dom, cnodes); + odom = dom(~isemptycell(evidence(dom))); + hdom = dom(isemptycell(evidence(dom))); + % If all hidden nodes are discrete and all cts nodes are observed + % (e.g., HMM with Gaussian output) + % we can add the observed evidence in parallel + if mysubset(ddom, hdom) & mysubset(cdom, odom) + [mu, Sigma, T] = add_cts_ev_to_marginals(fmarginal, evidence, ns, cnodes); + else + mu = zeros(ss, dpsz, nCPDs); + Sigma = zeros(ss, ss, dpsz, nCPDs); + T = zeros(dpsz, nCPDs); + for l=1:nCPDs + [mu(:,:,l), Sigma(:,:,:,l), T(:,l)] = add_ev_to_marginals(fmarginal{l}, evidence, ns, cnodes); + end + end +end +CPD.nsamples = CPD.nsamples + nCPDs; + + +if dpsz == 1 % no discrete parents + w = 1; +else + w = fullm.T(:); +end +CPD.Wsum = CPD.Wsum + w; +% Let X be the cts parent (if any), Y be the cts child (self). +xi = 1:cpsz; +yi = (cpsz+1):(cpsz+ss); +for i=1:dpsz + muY = fullm.mu(yi, i); + SYY = fullm.Sigma(yi, yi, i); + CPD.WYsum(:,i) = CPD.WYsum(:,i) + w(i)*muY; + CPD.WYYsum(:,:,i) = CPD.WYYsum(:,:,i) + w(i)*(SYY + muY*muY'); % E[X Y] = Cov[X,Y] + E[X] E[Y] + if cpsz > 0 + muX = fullm.mu(xi, i); + SXX = fullm.Sigma(xi, xi, i); + SXY = fullm.Sigma(xi, yi, i); + CPD.WXsum(:,i) = CPD.WXsum(:,i) + w(i)*muX; + CPD.WXYsum(:,:,i) = CPD.WXYsum(:,:,i) + w(i)*(SXY + muX*muY'); + CPD.WXXsum(:,:,i) = CPD.WXXsum(:,:,i) + w(i)*(SXX + muX*muX'); + end +end + + +%%%%%%%%%%%%% + +function fullm = add_evidence_to_marginal(fmarginal, evidence, ns, cnodes) + + +dom = fmarginal.domain; + +% Find out which values of the discrete parents (if any) are compatible with +% the discrete evidence (if any). +dnodes = mysetdiff(1:length(ns), cnodes); +ddom = myintersect(dom, dnodes); +cdom = myintersect(dom, cnodes); +odom = dom(~isemptycell(evidence(dom))); +hdom = dom(isemptycell(evidence(dom))); + +dobs = myintersect(ddom, odom); +dvals = cat(1, evidence{dobs}); +ens = ns; % effective node sizes +ens(dobs) = 1; +S = prod(ens(ddom)); +subs = ind2subv(ens(ddom), 1:S); +mask = find_equiv_posns(dobs, ddom); +subs(mask) = dvals; +supportedQs = subv2ind(ns(ddom), subs); + +if isempty(ddom) + Qarity = 1; +else + Qarity = prod(ns(ddom)); +end +fullm.T = zeros(Qarity, 1); +fullm.T(supportedQs) = fmarginal.T(:); + +% Now put the hidden cts parts into their right blocks, +% leaving the observed cts parts as 0. +cobs = myintersect(cdom, odom); +chid = myintersect(cdom, hdom); +cvals = cat(1, evidence{cobs}); +n = sum(ns(cdom)); +fullm.mu = zeros(n,Qarity); +fullm.Sigma = zeros(n,n,Qarity); + +if ~isempty(chid) + chid_blocks = block(find_equiv_posns(chid, cdom), ns(cdom)); +end +if ~isempty(cobs) + cobs_blocks = block(find_equiv_posns(cobs, cdom), ns(cdom)); +end + +for i=1:length(supportedQs) + Q = supportedQs(i); + if ~isempty(chid) + fullm.mu(chid_blocks, Q) = fmarginal.mu(:, i); + fullm.Sigma(chid_blocks, chid_blocks, Q) = fmarginal.Sigma(:,:,i); + end + if ~isempty(cobs) + fullm.mu(cobs_blocks, Q) = cvals(:); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/adjustable_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/adjustable_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function p = adjustable_CPD(CPD) +% ADJUSTABLE_CPD Does this CPD have any adjustable params? (gaussian) +% p = adjustable_CPD(CPD) + +p = ~CPD.clamped_mean | ~CPD.clamped_cov | ~CPD.clamped_weights; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_CPD_to_table_hidden_ps.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_CPD_to_table_hidden_ps.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,20 @@ +function T = convert_CPD_to_table_hidden_ps(CPD, self_val) +% CONVERT_CPD_TO_TABLE_HIDDEN_PS Convert a Gaussian CPD to a table +% function T = convert_CPD_to_table_hidden_ps(CPD, self_val) +% +% self_val must be a non-empty vector. +% All the parents are hidden. +% +% This is used by misc/convert_dbn_CPDs_to_tables + +m = CPD.mean; +C = CPD.cov; +W = CPD.weights; + +[ssz dpsize] = size(m); + +T = zeros(dpsize, 1); +for i=1:dpsize + T(i) = gaussian_prob(self_val, m(:,i), C(:,:,i)); +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_to_pot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_to_pot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,71 @@ +function pot = convert_to_pot(CPD, pot_type, domain, evidence) +% CONVERT_TO_POT Convert a Gaussian CPD to one or more potentials +% pot = convert_to_pot(CPD, pot_type, domain, evidence) + +sz = CPD.sizes; +ns = zeros(1, max(domain)); +ns(domain) = sz; + +odom = domain(~isemptycell(evidence(domain))); +ps = domain(1:end-1); +cps = ps(CPD.cps); +dps = ps(CPD.dps); +self = domain(end); +cdom = [cps(:)' self]; +ddom = dps; +cnodes = cdom; + +switch pot_type + case 'u', + error('gaussian utility potentials not yet supported'); + + case 'd', + T = convert_to_table(CPD, domain, evidence); + ns(odom) = 1; + pot = dpot(domain, ns(domain), T); + + case {'c','g'}, + [m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence); + pot = linear_gaussian_to_cpot(m, C, W, domain, ns, cnodes, evidence); + + case 'cg', + [m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence); + % Convert each conditional Gaussian to a canonical potential + cobs = myintersect(cdom, odom); + dobs = myintersect(ddom, odom); + ens = ns; % effective node size + ens(cobs) = 0; + ens(dobs) = 1; + dpsize = prod(ens(dps)); + can = cell(1, dpsize); + for i=1:dpsize + if isempty(W) + can{i} = linear_gaussian_to_cpot(m(:,i), C(:,:,i), [], cdom, ns, cnodes, evidence); + else + can{i} = linear_gaussian_to_cpot(m(:,i), C(:,:,i), W(:,:,i), cdom, ns, cnodes, evidence); + end + end + pot = cgpot(ddom, cdom, ens, can); + + case 'scg', + [m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence); + cobs = myintersect(cdom, odom); + dobs = myintersect(ddom, odom); + ens = ns; % effective node size + ens(cobs) = 0; + ens(dobs) = 1; + dpsize = prod(ens(dps)); + cpsize = size(W, 2); % cts parents size + ss = size(m, 1); % self size + cheaddom = self; + ctaildom = cps(:)'; + pot_array = cell(1, dpsize); + for i=1:dpsize + pot_array{i} = scgcpot(ss, cpsize, 1, m(:,i), W(:,:,i), C(:,:,i)); + end + pot = scgpot(ddom, cheaddom, ctaildom, ens, pot_array); + + otherwise, + error(['unrecognized pot_type' pot_type]) +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_to_table.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/convert_to_table.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,38 @@ +function T = convert_to_table(CPD, domain, evidence) +% CONVERT_TO_TABLE Convert a Gaussian CPD to a table +% T = convert_to_table(CPD, domain, evidence) + + +sz = CPD.sizes; +ns = zeros(1, max(domain)); +ns(domain) = sz; + +odom = domain(~isemptycell(evidence(domain))); +ps = domain(1:end-1); +cps = ps(CPD.cps); +dps = ps(CPD.dps); +self = domain(end); +cdom = [cps(:)' self]; +ddom = dps; +cnodes = cdom; + +[m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence); + + +ns(odom) = 1; +dpsize = prod(ns(dps)); +self = domain(end); +assert(myismember(self, odom)); +self_val = evidence{self}; +T = zeros(dpsize, 1); +if length(cps) > 0 + assert(~any(isemptycell(evidence(cps)))); + cps_vals = cat(1, evidence{cps}); + for i=1:dpsize + T(i) = gaussian_prob(self_val, m(:,i) + W(:,:,i)*cps_vals, C(:,:,i)); + end +else + for i=1:dpsize + T(i) = gaussian_prob(self_val, m(:,i), C(:,:,i)); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/display.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/display.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,4 @@ +function display(CPD) + +disp('gaussian_CPD object'); +disp(struct(CPD)); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/gaussian_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/gaussian_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,161 @@ +function CPD = gaussian_CPD(bnet, self, varargin) +% GAUSSIAN_CPD Make a conditional linear Gaussian distrib. +% +% CPD = gaussian_CPD(bnet, node, ...) will create a CPD with random parameters, +% where node is the number of a node in this equivalence class. + +% To define this CPD precisely, call the continuous (cts) parents (if any) X, +% the discrete parents (if any) Q, and this node Y. Then the distribution on Y is: +% - no parents: Y ~ N(mu, Sigma) +% - cts parents : Y|X=x ~ N(mu + W x, Sigma) +% - discrete parents: Y|Q=i ~ N(mu(i), Sigma(i)) +% - cts and discrete parents: Y|X=x,Q=i ~ N(mu(i) + W(i) x, Sigma(i)) +% +% The list below gives optional arguments [default value in brackets]. +% (Let ns(i) be the size of node i, X = ns(X), Y = ns(Y) and Q = prod(ns(Q)).) +% Parameters will be reshaped to the right size if necessary. +% +% mean - mu(:,i) is the mean given Q=i [ randn(Y,Q) ] +% cov - Sigma(:,:,i) is the covariance given Q=i [ repmat(100*eye(Y,Y), [1 1 Q]) ] +% weights - W(:,:,i) is the regression matrix given Q=i [ randn(Y,X,Q) ] +% cov_type - if 'diag', Sigma(:,:,i) is diagonal [ 'full' ] +% tied_cov - if 1, we constrain Sigma(:,:,i) to be the same for all i [0] +% clamp_mean - if 1, we do not adjust mu(:,i) during learning [0] +% clamp_cov - if 1, we do not adjust Sigma(:,:,i) during learning [0] +% clamp_weights - if 1, we do not adjust W(:,:,i) during learning [0] +% cov_prior_weight - weight given to I prior for estimating Sigma [0.01] +% cov_prior_entropic - if 1, we also use an entropic prior for Sigma [0] +% +% e.g., CPD = gaussian_CPD(bnet, i, 'mean', [0; 0], 'clamp_mean', 1) + +if nargin==0 + % This occurs if we are trying to load an object from a file. + CPD = init_fields; + clamp = 0; + CPD = class(CPD, 'gaussian_CPD', generic_CPD(clamp)); + return; +elseif isa(bnet, 'gaussian_CPD') + % This might occur if we are copying an object. + CPD = bnet; + return; +end +CPD = init_fields; + +CPD = class(CPD, 'gaussian_CPD', generic_CPD(0)); + +args = varargin; +ns = bnet.node_sizes; +ps = parents(bnet.dag, self); +dps = myintersect(ps, bnet.dnodes); +cps = myintersect(ps, bnet.cnodes); +fam_sz = ns([ps self]); + +CPD.self = self; +CPD.sizes = fam_sz; + +% Figure out which (if any) of the parents are discrete, and which cts, and how big they are +% dps = discrete parents, cps = cts parents +CPD.cps = find_equiv_posns(cps, ps); % cts parent index +CPD.dps = find_equiv_posns(dps, ps); +ss = fam_sz(end); +psz = fam_sz(1:end-1); +dpsz = prod(psz(CPD.dps)); +cpsz = sum(psz(CPD.cps)); + +% set default params +CPD.mean = randn(ss, dpsz); +CPD.cov = 100*repmat(eye(ss), [1 1 dpsz]); +CPD.weights = randn(ss, cpsz, dpsz); +CPD.cov_type = 'full'; +CPD.tied_cov = 0; +CPD.clamped_mean = 0; +CPD.clamped_cov = 0; +CPD.clamped_weights = 0; +CPD.cov_prior_weight = 0.01; +CPD.cov_prior_entropic = 0; +nargs = length(args); +if nargs > 0 + CPD = set_fields(CPD, args{:}); +end + +% Make sure the matrices have 1 dimension per discrete parent. +% Bug fix due to Xuejing Sun 3/6/01 +CPD.mean = myreshape(CPD.mean, [ss ns(dps)]); +CPD.cov = myreshape(CPD.cov, [ss ss ns(dps)]); +CPD.weights = myreshape(CPD.weights, [ss cpsz ns(dps)]); + +% Precompute indices into block structured matrices +% to speed up CPD_to_lambda_msg and CPD_to_pi +cpsizes = CPD.sizes(CPD.cps); +CPD.cps_block_ndx = cell(1, length(cps)); +for i=1:length(cps) + CPD.cps_block_ndx{i} = block(i, cpsizes); +end + +%%%%%%%%%%% +% Learning stuff + +% expected sufficient statistics +CPD.Wsum = zeros(dpsz,1); +CPD.WYsum = zeros(ss, dpsz); +CPD.WXsum = zeros(cpsz, dpsz); +CPD.WYYsum = zeros(ss, ss, dpsz); +CPD.WXXsum = zeros(cpsz, cpsz, dpsz); +CPD.WXYsum = zeros(cpsz, ss, dpsz); + +% For BIC +CPD.nsamples = 0; +switch CPD.cov_type + case 'full', + % since symmetric + %ncov_params = ss*(ss-1)/2; + ncov_params = ss*(ss+1)/2; + case 'diag', + ncov_params = ss; + otherwise + error(['unrecognized cov_type ' cov_type]); +end +% params = weights + mean + cov +if CPD.tied_cov + CPD.nparams = ss*cpsz*dpsz + ss*dpsz + ncov_params; +else + CPD.nparams = ss*cpsz*dpsz + ss*dpsz + dpsz*ncov_params; +end + +% for speeding up maximize_params +CPD.useC = exist('rep_mult'); + +clamped = CPD.clamped_mean & CPD.clamped_cov & CPD.clamped_weights; +CPD = set_clamped(CPD, clamped); + +%%%%%%%%%%% + +function CPD = init_fields() +% This ensures we define the fields in the same order +% no matter whether we load an object from a file, +% or create it from scratch. (Matlab requires this.) + +CPD.self = []; +CPD.sizes = []; +CPD.cps = []; +CPD.dps = []; +CPD.mean = []; +CPD.cov = []; +CPD.weights = []; +CPD.clamped_mean = []; +CPD.clamped_cov = []; +CPD.clamped_weights = []; +CPD.cov_type = []; +CPD.tied_cov = []; +CPD.Wsum = []; +CPD.WYsum = []; +CPD.WXsum = []; +CPD.WYYsum = []; +CPD.WXXsum = []; +CPD.WXYsum = []; +CPD.nsamples = []; +CPD.nparams = []; +CPD.cov_prior_weight = []; +CPD.cov_prior_entropic = []; +CPD.useC = []; +CPD.cps_block_ndx = []; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/gaussian_CPD_params_given_dps.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/gaussian_CPD_params_given_dps.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,28 @@ +function [m, C, W] = gaussian_CPD_params_given_dps(CPD, domain, evidence) +% GAUSSIAN_CPD_PARAMS_GIVEN_EV_ON_DPS Extract parameters given evidence on all discrete parents +% function [m, C, W] = gaussian_CPD_params_given_ev_on_dps(CPD, domain, evidence) + +ps = domain(1:end-1); +dps = ps(CPD.dps); +if isempty(dps) + m = CPD.mean; + C = CPD.cov; + W = CPD.weights; +else + odom = domain(~isemptycell(evidence(domain))); + dops = myintersect(dps, odom); + dpvals = cat(1, evidence{dops}); + if length(dops) == length(dps) + dpsizes = CPD.sizes(CPD.dps); + dpval = subv2ind(dpsizes, dpvals(:)'); + m = CPD.mean(:, dpval); + C = CPD.cov(:, :, dpval); + W = CPD.weights(:, :, dpval); + else + map = find_equiv_posns(dops, dps); + index = mk_multi_index(length(dps), map, dpvals); + m = CPD.mean(:, index{:}); + C = CPD.cov(:, :, index{:}); + W = CPD.weights(:, :, index{:}); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/get_field.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/get_field.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,19 @@ +function val = get_params(CPD, name) +% GET_PARAMS Get the parameters (fields) for a gaussian_CPD object +% val = get_params(CPD, name) +% +% The following fields can be accessed +% +% mean - mu(:,i) is the mean given Q=i +% cov - Sigma(:,:,i) is the covariance given Q=i +% weights - W(:,:,i) is the regression matrix given Q=i +% +% e.g., mean = get_params(CPD, 'mean') + +switch name + case 'mean', val = CPD.mean; + case 'cov', val = CPD.cov; + case 'weights', val = CPD.weights; + otherwise, + error(['invalid argument name ' name]); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/learn_params.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/learn_params.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,31 @@ +function CPD = learn_params(CPD, fam, data, ns, cnodes) +%function CPD = learn_params(CPD, fam, data, ns, cnodes) +% LEARN_PARAMS Compute the maximum likelihood estimate of the params of a gaussian CPD given complete data +% CPD = learn_params(CPD, fam, data, ns, cnodes) +% +% data(i,m) is the value of node i in case m (can be cell array). +% We assume this node has a maximize_params method. + +ncases = size(data, 2); +CPD = reset_ess(CPD); +% make a fully observed joint distribution over the family +fmarginal.domain = fam; +fmarginal.T = 1; +fmarginal.mu = []; +fmarginal.Sigma = []; +if ~iscell(data) + cases = num2cell(data); +else + cases = data; +end +hidden_bitv = zeros(1, max(fam)); +for m=1:ncases + % specify (as a bit vector) which elements in the family domain are hidden + hidden_bitv = zeros(1, max(fmarginal.domain)); + ev = cases(:,m); + hidden_bitv(find(isempty(ev)))=1; + CPD = update_ess(CPD, fmarginal, ev, ns, cnodes, hidden_bitv); +end +CPD = maximize_params(CPD); + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/log_prob_node.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/log_prob_node.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,49 @@ +function L = log_prob_node(CPD, self_ev, pev) +% LOG_PROB_NODE Compute prod_m log P(x(i,m)| x(pi_i,m), theta_i) for node i (gaussian) +% L = log_prob_node(CPD, self_ev, pev) +% +% self_ev(m) is the evidence on this node in case m. +% pev(i,m) is the evidence on the i'th parent in case m (if there are any parents). +% (These may also be cell arrays.) + +if iscell(self_ev), usecell = 1; else usecell = 0; end + +use_log = 1; +ncases = length(self_ev); +nparents = length(CPD.sizes)-1; +assert(ncases == size(pev, 2)); + +if ncases == 0 + L = 0; + return; +end + +L = 0; +for m=1:ncases + if isempty(CPD.dps) + i = 1; + else + if usecell + dpvals = cat(1, pev{CPD.dps, m}); + else + dpvals = pev(CPD.dps, m); + end + i = subv2ind(CPD.sizes(CPD.dps), dpvals(:)'); + end + if usecell + y = self_ev{m}; + else + y = self_ev(m); + end + if length(CPD.cps) == 0 + L = L + gaussian_prob(y, CPD.mean(:,i), CPD.cov(:,:,i), use_log); + else + if usecell + x = cat(1, pev{CPD.cps, m}); + else + x = pev(CPD.cps, m); + end + L = L + gaussian_prob(y, CPD.mean(:,i) + CPD.weights(:,:,i)*x, CPD.cov(:,:,i), use_log); + end +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/maximize_params.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/maximize_params.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,68 @@ +function CPD = maximize_params(CPD, temp) +% MAXIMIZE_PARAMS Set the params of a CPD to their ML values (Gaussian) +% CPD = maximize_params(CPD, temperature) +% +% Temperature is currently ignored. + +if ~adjustable_CPD(CPD), return; end + + +if CPD.clamped_mean + cl_mean = CPD.mean; +else + cl_mean = []; +end + +if CPD.clamped_cov + cl_cov = CPD.cov; +else + cl_cov = []; +end + +if CPD.clamped_weights + cl_weights = CPD.weights; +else + cl_weights = []; +end + +[ssz psz Q] = size(CPD.weights); + +[ss cpsz dpsz] = size(CPD.weights); % ss = self size = ssz +if cpsz > CPD.nsamples + fprintf('gaussian_CPD/maximize_params: warning: input dimension (%d) > nsamples (%d)\n', ... + cpsz, CPD.nsamples); +end + +prior = repmat(CPD.cov_prior_weight*eye(ssz,ssz), [1 1 Q]); + + +[CPD.mean, CPD.cov, CPD.weights] = ... + clg_Mstep(CPD.Wsum, CPD.WYsum, CPD.WYYsum, [], CPD.WXsum, CPD.WXXsum, CPD.WXYsum, ... + 'cov_type', CPD.cov_type, 'clamped_mean', cl_mean, ... + 'clamped_cov', cl_cov, 'clamped_weights', cl_weights, ... + 'tied_cov', CPD.tied_cov, ... + 'cov_prior', prior); + +if 0 +CPD.mean = reshape(CPD.mean, [ss dpsz]); +CPD.cov = reshape(CPD.cov, [ss ss dpsz]); +CPD.weights = reshape(CPD.weights, [ss cpsz dpsz]); +end + +% Bug fix 11 May 2003 KPM +% clg_Mstep collapses all discrete parents into one mega-node +% but convert_to_CPT needs access to each parent separately +sz = CPD.sizes; +ss = sz(end); + +% Bug fix KPM 20 May 2003: +cpsz = sum(sz(CPD.cps)); +%if isempty(CPD.cps) +% cpsz = 0; +%else +% cpsz = sz(CPD.cps); +%end +dpsz = sz(CPD.dps); +CPD.mean = myreshape(CPD.mean, [ss dpsz]); +CPD.cov = myreshape(CPD.cov, [ss ss dpsz]); +CPD.weights = myreshape(CPD.weights, [ss cpsz dpsz]); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/maximize_params_debug.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/maximize_params_debug.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,189 @@ +function CPD = maximize_params(CPD, temp) +% MAXIMIZE_PARAMS Set the params of a CPD to their ML values (Gaussian) +% CPD = maximize_params(CPD, temperature) +% +% Temperature is currently ignored. + +if ~adjustable_CPD(CPD), return; end + +CPD1 = struct(new_maximize_params(CPD)); +CPD2 = struct(old_maximize_params(CPD)); +assert(approxeq(CPD1.mean, CPD2.mean)) +assert(approxeq(CPD1.cov, CPD2.cov)) +assert(approxeq(CPD1.weights, CPD2.weights)) + +CPD = new_maximize_params(CPD); + +%%%%%%% +function CPD = new_maximize_params(CPD) + +if CPD.clamped_mean + cl_mean = CPD.mean; +else + cl_mean = []; +end + +if CPD.clamped_cov + cl_cov = CPD.cov; +else + cl_cov = []; +end + +if CPD.clamped_weights + cl_weights = CPD.weights; +else + cl_weights = []; +end + +[ssz psz Q] = size(CPD.weights); + +prior = repmat(CPD.cov_prior_weight*eye(ssz,ssz), [1 1 Q]); +[CPD.mean, CPD.cov, CPD.weights] = ... + Mstep_clg('w', CPD.Wsum, 'YY', CPD.WYYsum, 'Y', CPD.WYsum, 'YTY', [], ... + 'XX', CPD.WXXsum, 'XY', CPD.WXYsum, 'X', CPD.WXsum, ... + 'cov_type', CPD.cov_type, 'clamped_mean', cl_mean, ... + 'clamped_cov', cl_cov, 'clamped_weights', cl_weights, ... + 'tied_cov', CPD.tied_cov, ... + 'cov_prior', prior); + + +%%%%%%%%%%% + +function CPD = old_maximize_params(CPD) + + +if ~adjustable_CPD(CPD), return; end + +%assert(approxeq(CPD.nsamples, sum(CPD.Wsum))); +assert(~any(isnan(CPD.WXXsum))) +assert(~any(isnan(CPD.WXYsum))) +assert(~any(isnan(CPD.WYYsum))) + +[self_size cpsize dpsize] = size(CPD.weights); + +% Append 1s to the parents, and derive the corresponding cross products. +% This is used when estimate the means and weights simultaneosuly, +% and when estimatting Sigma. +% Let x2 = [x 1]' +XY = zeros(cpsize+1, self_size, dpsize); % XY(:,:,i) = sum_l w(l,i) x2(l) y(l)' +XX = zeros(cpsize+1, cpsize+1, dpsize); % XX(:,:,i) = sum_l w(l,i) x2(l) x2(l)' +YY = zeros(self_size, self_size, dpsize); % YY(:,:,i) = sum_l w(l,i) y(l) y(l)' +for i=1:dpsize + XY(:,:,i) = [CPD.WXYsum(:,:,i) % X*Y + CPD.WYsum(:,i)']; % 1*Y + % [x * [x' 1] = [xx' x + % 1] x' 1] + XX(:,:,i) = [CPD.WXXsum(:,:,i) CPD.WXsum(:,i); + CPD.WXsum(:,i)' CPD.Wsum(i)]; + YY(:,:,i) = CPD.WYYsum(:,:,i); +end + +w = CPD.Wsum(:); +% Set any zeros to one before dividing +% This is valid because w(i)=0 => WYsum(:,i)=0, etc +w = w + (w==0); + +if CPD.clamped_mean + % Estimating B2 and then setting the last column (the mean) to the clamped mean is *not* equivalent + % to estimating B and then adding the clamped_mean to the last column. + if ~CPD.clamped_weights + B = zeros(self_size, cpsize, dpsize); + for i=1:dpsize + if det(CPD.WXXsum(:,:,i))==0 + B(:,:,i) = 0; + else + % Eqn 9 in table 2 of TR + %B(:,:,i) = CPD.WXYsum(:,:,i)' * inv(CPD.WXXsum(:,:,i)); + B(:,:,i) = (CPD.WXXsum(:,:,i) \ CPD.WXYsum(:,:,i))'; + end + end + %CPD.weights = reshape(B, [self_size cpsize dpsize]); + CPD.weights = B; + end +elseif CPD.clamped_weights % KPM 1/25/02 + if ~CPD.clamped_mean % ML estimate is just sample mean of the residuals + for i=1:dpsize + CPD.mean(:,i) = (CPD.WYsum(:,i) - CPD.weights(:,:,i) * CPD.WXsum(:,i)) / w(i); + end + end +else % nothing is clamped, so estimate mean and weights simultaneously + B2 = zeros(self_size, cpsize+1, dpsize); + for i=1:dpsize + if det(XX(:,:,i))==0 % fix by U. Sondhauss 6/27/99 + B2(:,:,i)=0; + else + % Eqn 9 in table 2 of TR + %B2(:,:,i) = XY(:,:,i)' * inv(XX(:,:,i)); + B2(:,:,i) = (XX(:,:,i) \ XY(:,:,i))'; + end + CPD.mean(:,i) = B2(:,cpsize+1,i); + CPD.weights(:,:,i) = B2(:,1:cpsize,i); + end +end + +% Let B2 = [W mu] +if cpsize>0 + B2(:,1:cpsize,:) = reshape(CPD.weights, [self_size cpsize dpsize]); +end +B2(:,cpsize+1,:) = reshape(CPD.mean, [self_size dpsize]); + +% To avoid singular covariance matrices, +% we use the regularization method suggested in "A Quasi-Bayesian approach to estimating +% parameters for mixtures of normal distributions", Hamilton 91. +% If the ML estimate is Sigma = M/N, the MAP estimate is (M+gamma*I) / (N+gamma), +% where gamma >=0 is a smoothing parameter (equivalent sample size of I prior) + +gamma = CPD.cov_prior_weight; + +if ~CPD.clamped_cov + if CPD.cov_prior_entropic % eqn 12 of Brand AI/Stat 99 + Z = 1-temp; + % When temp > 1, Z is negative, so we are dividing by a smaller + % number, ie. increasing the variance. + else + Z = 0; + end + if CPD.tied_cov + S = zeros(self_size, self_size); + % Eqn 2 from table 2 in TR + for i=1:dpsize + S = S + (YY(:,:,i) - B2(:,:,i)*XY(:,:,i)); + end + %denom = CPD.nsamples + gamma + Z; + denom = CPD.nsamples + Z; + S = (S + gamma*eye(self_size)) / denom; + if strcmp(CPD.cov_type, 'diag') + S = diag(diag(S)); + end + CPD.cov = repmat(S, [1 1 dpsize]); + else + for i=1:dpsize + % Eqn 1 from table 2 in TR + S = YY(:,:,i) - B2(:,:,i)*XY(:,:,i); + %denom = w(i) + gamma + Z; + denom = w(i) + Z; + S = (S + gamma*eye(self_size)) / denom; + CPD.cov(:,:,i) = S; + end + if strcmp(CPD.cov_type, 'diag') + for i=1:dpsize + CPD.cov(:,:,i) = diag(diag(CPD.cov(:,:,i))); + end + end + end +end + + +check_covars = 0; +min_covar = 1e-5; +if check_covars % prevent collapsing to a point + for i=1:dpsize + if min(svd(CPD.cov(:,:,i))) < min_covar + disp(['resetting singular covariance for node ' num2str(CPD.self)]); + CPD.cov(:,:,i) = CPD.init_cov(:,:,i); + end + end +end + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CPD_to_linear_gaussian.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CPD_to_linear_gaussian.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,19 @@ +function [mu, Sigma, W] = CPD_to_linear_gaussian(CPD, domain, ns, cnodes, evidence) + +ps = domain(1:end-1); +dnodes = mysetdiff(1:length(ns), cnodes); +dps = myintersect(ps, dnodes); % discrete parents + +if isempty(dps) + Q = 1; +else + assert(~any(isemptycell(evidence(dps)))); + dpvals = cat(1, evidence{dps}); + Q = subv2ind(ns(dps), dpvals(:)'); +end + +mu = CPD.mean(:,Q); +Sigma = CPD.cov(:,:,Q); +W = CPD.weights(:,:,Q); + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,2 @@ +/CPD_to_linear_gaussian.m/1.1.1.1/Wed May 29 15:59:52 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@gaussian_CPD/private diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/private/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/reset_ess.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/reset_ess.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,11 @@ +function CPD = reset_ess(CPD) +% RESET_ESS Reset the Expected Sufficient Statistics for a Gaussian CPD. +% CPD = reset_ess(CPD) + +CPD.nsamples = 0; +CPD.Wsum = zeros(size(CPD.Wsum)); +CPD.WYsum = zeros(size(CPD.WYsum)); +CPD.WYYsum = zeros(size(CPD.WYYsum)); +CPD.WXsum = zeros(size(CPD.WXsum)); +CPD.WXXsum = zeros(size(CPD.WXXsum)); +CPD.WXYsum = zeros(size(CPD.WXYsum)); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/sample_node.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/sample_node.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,22 @@ +function y = sample_node(CPD, pev) +% SAMPLE_NODE Draw a random sample from P(Xi | x(pi_i), theta_i) (gaussian) +% y = sample_node(CPD, parent_evidence) +% +% pev{i} is the value of the i'th parent (if there are any parents) +% y is the sampled value (a scalar or vector) + +if length(CPD.dps)==0 + i = 1; +else + dpvals = cat(1, pev{CPD.dps}); + i = subv2ind(CPD.sizes(CPD.dps), dpvals(:)'); +end + +if length(CPD.cps) == 0 + y = gsamp(CPD.mean(:,i), CPD.cov(:,:,i), 1); +else + pev = pev(:); + x = cat(1, pev{CPD.cps}); + y = gsamp(CPD.mean(:,i) + CPD.weights(:,:,i)*x(:), CPD.cov(:,:,i), 1); +end +y = y(:); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/set_fields.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/set_fields.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,43 @@ +function CPD = set_fields(CPD, varargin) +% SET_PARAMS Set the parameters (fields) for a gaussian_CPD object +% CPD = set_params(CPD, name/value pairs) +% +% The following optional arguments can be specified in the form of name/value pairs: +% +% mean - mu(:,i) is the mean given Q=i +% cov - Sigma(:,:,i) is the covariance given Q=i +% weights - W(:,:,i) is the regression matrix given Q=i +% cov_type - if 'diag', Sigma(:,:,i) is diagonal +% tied_cov - if 1, we constrain Sigma(:,:,i) to be the same for all i +% clamp_mean - if 1, we do not adjust mu(:,i) during learning +% clamp_cov - if 1, we do not adjust Sigma(:,:,i) during learning +% clamp_weights - if 1, we do not adjust W(:,:,i) during learning +% clamp - if 1, we do not adjust any params +% cov_prior_weight - weight given to I prior for estimating Sigma +% cov_prior_entropic - if 1, we also use an entropic prior for Sigma [0] +% +% e.g., CPD = set_params(CPD, 'mean', [0;0]) + +args = varargin; +nargs = length(args); +for i=1:2:nargs + switch args{i}, + case 'mean', CPD.mean = args{i+1}; + case 'cov', CPD.cov = args{i+1}; + case 'weights', CPD.weights = args{i+1}; + case 'cov_type', CPD.cov_type = args{i+1}; + %case 'tied_cov', CPD.tied_cov = strcmp(args{i+1}, 'yes'); + case 'tied_cov', CPD.tied_cov = args{i+1}; + case 'clamp_mean', CPD.clamped_mean = args{i+1}; + case 'clamp_cov', CPD.clamped_cov = args{i+1}; + case 'clamp_weights', CPD.clamped_weights = args{i+1}; + case 'clamp', clamp = args{i+1}; + CPD.clamped_mean = clamp; + CPD.clamped_cov = clamp; + CPD.clamped_weights = clamp; + case 'cov_prior_weight', CPD.cov_prior_weight = args{i+1}; + case 'cov_prior_entropic', CPD.cov_prior_entropic = args{i+1}; + otherwise, + error(['invalid argument name ' args{i}]); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/update_ess.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gaussian_CPD/update_ess.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,88 @@ +function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv) +% UPDATE_ESS Update the Expected Sufficient Statistics of a Gaussian node +% function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv) + +%if nargin < 6 +% hidden_bitv = zeros(1, max(fmarginal.domain)); +% hidden_bitv(find(isempty(evidence)))=1; +%end + +dom = fmarginal.domain; +self = dom(end); +ps = dom(1:end-1); +cps = myintersect(ps, cnodes); +dps = mysetdiff(ps, cps); + +CPD.nsamples = CPD.nsamples + 1; +[ss cpsz dpsz] = size(CPD.weights); % ss = self size +[ss dpsz] = size(CPD.mean); + +% Let X be the cts parent (if any), Y be the cts child (self). + +if ~hidden_bitv(self) & ~any(hidden_bitv(cps)) & all(hidden_bitv(dps)) + % Speedup for the common case that all cts nodes are observed, all discrete nodes are hidden + % Since X and Y are observed, SYY = 0, SXX = 0, SXY = 0 + % Since discrete parents are hidden, we do not need to add evidence to w. + w = fmarginal.T(:); + CPD.Wsum = CPD.Wsum + w; + y = evidence{self}; + Cyy = y*y'; + if ~CPD.useC + WY = repmat(w(:)',ss,1); % WY(y,i) = w(i) + WYY = repmat(reshape(WY, [ss 1 dpsz]), [1 ss 1]); % WYY(y,y',i) = w(i) + %CPD.WYsum = CPD.WYsum + WY .* repmat(y(:), 1, dpsz); + CPD.WYsum = CPD.WYsum + y(:) * w(:)'; + CPD.WYYsum = CPD.WYYsum + WYY .* repmat(reshape(Cyy, [ss ss 1]), [1 1 dpsz]); + else + W = w(:)'; + W2 = reshape(W, [1 1 dpsz]); + CPD.WYsum = CPD.WYsum + rep_mult(W, y(:), size(CPD.WYsum)); + CPD.WYYsum = CPD.WYYsum + rep_mult(W2, Cyy, size(CPD.WYYsum)); + end + if cpsz > 0 % X exists + x = cat(1, evidence{cps}); x = x(:); + Cxx = x*x'; + Cxy = x*y'; + WX = repmat(w(:)',cpsz,1); % WX(x,i) = w(i) + WXX = repmat(reshape(WX, [cpsz 1 dpsz]), [1 cpsz 1]); % WXX(x,x',i) = w(i) + WXY = repmat(reshape(WX, [cpsz 1 dpsz]), [1 ss 1]); % WXY(x,y,i) = w(i) + if ~CPD.useC + CPD.WXsum = CPD.WXsum + WX .* repmat(x(:), 1, dpsz); + CPD.WXXsum = CPD.WXXsum + WXX .* repmat(reshape(Cxx, [cpsz cpsz 1]), [1 1 dpsz]); + CPD.WXYsum = CPD.WXYsum + WXY .* repmat(reshape(Cxy, [cpsz ss 1]), [1 1 dpsz]); + else + CPD.WXsum = CPD.WXsum + rep_mult(W, x(:), size(CPD.WXsum)); + CPD.WXXsum = CPD.WXXsum + rep_mult(W2, Cxx, size(CPD.WXXsum)); + CPD.WXYsum = CPD.WXYsum + rep_mult(W2, Cxy, size(CPD.WXYsum)); + end + end + return; +end + +% general (non-vectorized) case +fullm = add_evidence_to_gmarginal(fmarginal, evidence, ns, cnodes); % slow! + +if dpsz == 1 % no discrete parents + w = 1; +else + w = fullm.T(:); +end + +CPD.Wsum = CPD.Wsum + w; +xi = 1:cpsz; +yi = (cpsz+1):(cpsz+ss); +for i=1:dpsz + muY = fullm.mu(yi, i); + SYY = fullm.Sigma(yi, yi, i); + CPD.WYsum(:,i) = CPD.WYsum(:,i) + w(i)*muY; + CPD.WYYsum(:,:,i) = CPD.WYYsum(:,:,i) + w(i)*(SYY + muY*muY'); % E[X Y] = Cov[X,Y] + E[X] E[Y] + if cpsz > 0 + muX = fullm.mu(xi, i); + SXX = fullm.Sigma(xi, xi, i); + SXY = fullm.Sigma(xi, yi, i); + CPD.WXsum(:,i) = CPD.WXsum(:,i) + w(i)*muX; + CPD.WXXsum(:,:,i) = CPD.WXXsum(:,:,i) + w(i)*(SXX + muX*muX'); + CPD.WXYsum(:,:,i) = CPD.WXYsum(:,:,i) + w(i)*(SXY + muX*muY'); + end +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,8 @@ +/README/1.1.1.1/Wed May 29 15:59:52 2002// +/adjustable_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002// +/display.m/1.1.1.1/Wed May 29 15:59:52 2002// +/generic_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002// +/learn_params.m/1.1.1.1/Thu Jun 10 01:53:20 2004// +/log_prior.m/1.1.1.1/Wed May 29 15:59:52 2002// +/set_clamped.m/1.1.1.1/Wed May 29 15:59:52 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Entries.Log --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Entries.Log Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +A D/Old//// diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@generic_CPD diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/BIC_score_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/BIC_score_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,26 @@ +function score = BIC_score_CPD(CPD, fam, data, ns, cnodes) +% BIC_score_CPD Compute the BIC score of a generic CPD +% score = BIC_score_CPD(CPD, fam, data, ns, cnodes) +% +% We assume this node has a maximize_params method + +ncases = size(data, 2); +CPD = reset_ess(CPD); +% make a fully observed joint distribution over the family +fmarginal.domain = fam; +fmarginal.T = 1; +fmarginal.mu = []; +fmarginal.Sigma = []; +if ~iscell(data) + cases = num2cell(data); +else + cases = data; +end +for m=1:ncases + CPD = update_ess(CPD, fmarginal, cases(:,m), ns, cnodes); +end +CPD = maximize_params(CPD); +self = fam(end); +ps = fam(1:end-1); +L = log_prob_node(CPD, cases(self,:), cases(ps,:)); +score = L - 0.5*CPD.nparams*log(ncases); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CPD_to_dpots.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CPD_to_dpots.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,16 @@ +function pots = CPD_to_dpots(CPD, domain, ns, cnodes, evidence) +% CPD_TO_DPOTS Convert the CPD to several discrete potentials, for different instantiations (generic) +% pots = CPD_to_dpots(CPD, domain, ns, cnodes, evidence) +% +% domain(:,i) is the domain of the i'th instantiation of CPD. +% node_sizes(i) is the size of node i. +% cnodes = all the cts nodes +% evidence{i} is the evidence on the i'th node. +% +% This just calls CPD_to_dpot for each domain. + +nCPDs = size(domain,2); +pots = cell(1,nCPDs); +for i=1:nCPDs + pots{i} = CPD_to_dpot(CPD, domain(:,i), ns, cnodes, evidence); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,3 @@ +/BIC_score_CPD.m/1.1.1.1/Wed May 29 15:59:52 2002// +/CPD_to_dpots.m/1.1.1.1/Wed May 29 15:59:52 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@generic_CPD/Old diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/Old/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/README --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/README Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,2 @@ +A generic CPD implements general purpose functions like 'display', +that subtypes can inherit. diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/adjustable_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/adjustable_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function p = adjustable_CPD(CPD) +% ADJUSTABLE_CPD Does this CPD have any adjustable params? (generic) +% p = adjustable_CPD(CPD) + +p = ~CPD.clamped; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/display.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/display.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,3 @@ +function display(CPD) + +disp(struct(CPD)); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/generic_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/generic_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,8 @@ +function CPD = generic_CPD(clamped) +% GENERIC_CPD Virtual constructor for generic CPD +% CPD = discrete_CPD(clamped) + +if nargin < 1, clamped = 0; end + +CPD.clamped = clamped; +CPD = class(CPD, 'generic_CPD'); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/learn_params.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/learn_params.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,32 @@ +function CPD = learn_params(CPD, fam, data, ns, cnodes) +% LEARN_PARAMS Compute the maximum likelihood estimate of the params of a generic CPD given complete data +% CPD = learn_params(CPD, fam, data, ns, cnodes) +% +% data(i,m) is the value of node i in case m (can be cell array). +% We assume this node has a maximize_params method. + +%error('no longer supported') % KPM 1 Feb 03 + +if 1 +ncases = size(data, 2); +CPD = reset_ess(CPD); +% make a fully observed joint distribution over the family +fmarginal.domain = fam; +fmarginal.T = 1; +fmarginal.mu = []; +fmarginal.Sigma = []; +if ~iscell(data) + cases = num2cell(data); +else + cases = data; +end +hidden_bitv = zeros(1, max(fam)); +for m=1:ncases + % specify (as a bit vector) which elements in the family domain are hidden + hidden_bitv = zeros(1, max(fmarginal.domain)); + ev = cases(:,m); + hidden_bitv(find(isempty(evidence)))=1; + CPD = update_ess(CPD, fmarginal, ev, ns, cnodes, hidden_bitv); +end +CPD = maximize_params(CPD); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/log_prior.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/log_prior.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,5 @@ +function L = log_prior(CPD) +% LOG_PRIOR Return log P(theta) for a generic CPD - we return 0 +% L = log_prior(CPD) + +L = 0; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/set_clamped.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@generic_CPD/set_clamped.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,3 @@ +function CPD = set_clamped(CPD, bit) + +CPD.clamped = bit; diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CPD_to_lambda_msg.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CPD_to_lambda_msg.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,62 @@ +function lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence) +% CPD_TO_LAMBDA_MSG Compute lambda message (gmux) +% lam_msg = compute_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence) +% Pearl p183 eq 4.52 + +% Let Y be this node, X1..Xn be the cts parents and M the discrete switch node. +% e.g., for n=3, M=1 +% +% X1 X2 X3 M +% \ +% \ +% Y +% +% So the only case in which we send an informative message is if p=1=M. +% To the other cts parents, we send the "know nothing" message. + +switch msg_type + case 'd', + error('gaussian_CPD can''t create discrete msgs') + case 'g', + cps = ps(CPD.cps); + cpsizes = CPD.sizes(CPD.cps); + self_size = CPD.sizes(end); + i = find_equiv_posns(p, cps); % p is n's i'th cts parent + psz = cpsizes(i); + dps = ps(CPD.dps); + M = evidence{dps}; + if isempty(M) + error('gmux node must have observed discrete parent') + end + P = msg{n}.lambda.precision; + if all(P == 0) | (cps(M) ~= p) % if we know nothing, or are sending to a disconnected parent + lam_msg.precision = zeros(psz, psz); + lam_msg.info_state = zeros(psz, 1); + return; + end + % We are sending a message to the only effectively connected parent. + % There are no other incoming pi messages. + Bmu = CPD.mean(:,M); + BSigma = CPD.cov(:,:,M); + Bi = CPD.weights(:,:,M); + if (det(P) > 0) | isinf(P) + if isinf(P) % Y is observed + Sigma_lambda = zeros(self_size, self_size); % infinite precision => 0 variance + mu_lambda = msg{n}.lambda.mu; % observed_value; + else + Sigma_lambda = inv(P); + mu_lambda = Sigma_lambda * msg{n}.lambda.info_state; + end + C = inv(Sigma_lambda + BSigma); + lam_msg.precision = Bi' * C * Bi; + lam_msg.info_state = Bi' * C * (mu_lambda - Bmu); + else + % method that uses matrix inversion lemma + A = inv(P + inv(BSigma)); + C = P - P*A*P; + lam_msg.precision = Bi' * C * Bi; + D = eye(self_size) - P*A; + z = msg{n}.lambda.info_state; + lam_msg.info_state = Bi' * (D*z - D*P*Bmu); + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CPD_to_pi.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CPD_to_pi.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,18 @@ +function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence) +% CPD_TO_PI Compute the pi vector (gaussian) +% function pi = CPD_to_pi(CPD, msg_type, n, ps, msg, evidence) + +switch msg_type + case 'd', + error('gaussian_CPD can''t create discrete msgs') + case 'g', + dps = ps(CPD.dps); + k = evidence{dps}; + if isempty(k) + error('gmux node must have observed discrete parent') + end + m = msg{n}.pi_from_parent{k}; + B = CPD.weights(:,:,k); + pi.mu = CPD.mean(:,k) + B * m.mu; + pi.Sigma = CPD.cov(:,:,k) + B * m.Sigma * B'; +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,7 @@ +/CPD_to_lambda_msg.m/1.1.1.1/Wed May 29 15:59:52 2002// +/CPD_to_pi.m/1.1.1.1/Wed May 29 15:59:54 2002// +/convert_to_pot.m/1.1.1.1/Wed May 29 15:59:52 2002// +/display.m/1.1.1.1/Wed May 29 15:59:54 2002// +/gmux_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002// +/sample_node.m/1.1.1.1/Wed May 29 15:59:54 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Entries.Log --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Entries.Log Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +A D/Old//// diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@gmux_CPD diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,2 @@ +/gmux_CPD.m/1.1.1.1/Wed May 29 15:59:54 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@gmux_CPD/Old diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/gmux_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/Old/gmux_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,92 @@ +function CPD = gmux_CPD(bnet, self, varargin) +% GMUX_CPD Make a Gaussian multiplexer node +% +% CPD = gmux_CPD(bnet, node, ...) is used similarly to gaussian_CPD, +% except we assume there is exactly one discrete parent (call it M) +% which is used to select which cts parent to pass through to the output. +% i.e., we define P(Y=y|M=m, X1, ..., XK) = N(y | W*x(m) + mu, Sigma) +% where Y represents this node, and the Xi's are the cts parents. +% All the Xi must have the same size, and the num values for M must be K. +% +% Currently the params for this kind of CPD cannot be learned. +% +% Optional arguments [ default in brackets ] +% +% mean - mu [zeros(Y,1)] +% cov - Sigma [eye(Y,Y)] +% weights - W [ randn(Y,X) ] + +if nargin==0 + % This occurs if we are trying to load an object from a file. + CPD = init_fields; + clamp = 0; + CPD = class(CPD, 'gmux_CPD', generic_CPD(clamp)); + return; +elseif isa(bnet, 'gmux_CPD') + % This might occur if we are copying an object. + CPD = bnet; + return; +end +CPD = init_fields; + +CPD = class(CPD, 'gmux_CPD', generic_CPD(1)); + +ns = bnet.node_sizes; +ps = parents(bnet.dag, self); +dps = myintersect(ps, bnet.dnodes); +cps = myintersect(ps, bnet.cnodes); +fam_sz = ns([ps self]); + +CPD.self = self; +CPD.sizes = fam_sz; + +% Figure out which (if any) of the parents are discrete, and which cts, and how big they are +% dps = discrete parents, cps = cts parents +CPD.cps = find_equiv_posns(cps, ps); % cts parent index +CPD.dps = find_equiv_posns(dps, ps); +if length(CPD.dps) ~= 1 + error('gmux must have exactly 1 discrete parent') +end +ss = fam_sz(end); +cpsz = fam_sz(CPD.cps(1)); % in gaussian_CPD, cpsz = sum(fam_sz(CPD.cps)) +if ~all(fam_sz(CPD.cps) == cpsz) + error('all cts parents must have same size') +end +dpsz = fam_sz(CPD.dps); +if dpsz ~= length(cps) + error(['the arity of the mux node is ' num2str(dpsz) ... + ' but there are ' num2str(length(cps)) ' cts parents']); +end + +% set default params +CPD.mean = zeros(ss, 1); +CPD.cov = eye(ss); +CPD.weights = randn(ss, cpsz); + +args = varargin; +nargs = length(args); +for i=1:2:nargs + switch args{i}, + case 'mean', CPD.mean = args{i+1}; + case 'cov', CPD.cov = args{i+1}; + case 'weights', CPD.weights = args{i+1}; + otherwise, + error(['invalid argument name ' args{i}]); + end +end + +%%%%%%%%%%% + +function CPD = init_fields() +% This ensures we define the fields in the same order +% no matter whether we load an object from a file, +% or create it from scratch. (Matlab requires this.) + +CPD.self = []; +CPD.sizes = []; +CPD.cps = []; +CPD.dps = []; +CPD.mean = []; +CPD.cov = []; +CPD.weights = []; + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/convert_to_pot.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/convert_to_pot.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,37 @@ +function pot = convert_to_pot(CPD, pot_type, domain, evidence) +% CONVERT_TO_POT Convert a gmux CPD to a Gaussian potential +% pot = convert_to_pot(CPD, pot_type, domain, evidence) + +switch pot_type + case {'d', 'u', 'cg', 'scg'}, + error(['can''t convert gmux to potential of type ' pot_type]) + + case {'c','g'}, + % We create a large weight matrix with zeros in all blocks corresponding + % to the non-chosen parents, since they are effectively disconnected. + % The chosen parent is determined by the value, m, of the discrete parent. + % Thus the potential is as large as the whole family. + ps = domain(1:end-1); + dps = ps(CPD.dps); % CPD.dps is an index, not a node number (because of param tying) + cps = ps(CPD.cps); + m = evidence{dps}; + if isempty(m) + error('gmux node must have observed discrete parent') + end + bs = CPD.sizes(CPD.cps); + b = block(m, bs); + sum_cpsz = sum(CPD.sizes(CPD.cps)); + selfsz = CPD.sizes(end); + W = zeros(selfsz, sum_cpsz); + W(:,b) = CPD.weights(:,:,m); + + ns = zeros(1, max(domain)); + ns(domain) = CPD.sizes; + self = domain(end); + cdom = [cps(:)' self]; + pot = linear_gaussian_to_cpot(CPD.mean(:,m), CPD.cov(:,:,m), W, domain, ns, cdom, evidence); + + otherwise, + error(['unrecognized pot_type' pot_type]) +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/display.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/display.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,4 @@ +function display(CPD) + +disp('gmux_CPD object'); +disp(struct(CPD)); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/gmux_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/gmux_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,95 @@ +function CPD = gmux_CPD(bnet, self, varargin) +% GMUX_CPD Make a Gaussian multiplexer node +% +% CPD = gmux_CPD(bnet, node, ...) is used similarly to gaussian_CPD, +% except we assume there is exactly one discrete parent (call it M) +% which is used to select which cts parent to pass through to the output. +% i.e., we define P(Y=y|M=m, X1, ..., XK) = N(y | W(m)*x(m) + mu(m), Sigma(m)) +% where Y represents this node, and the Xi's are the cts parents. +% All the Xi must have the same size, and the num values for M must be K. +% +% Currently the params for this kind of CPD cannot be learned. +% +% Optional arguments [ default in brackets ] +% +% mean - mu(:,i) is the mean given M=i [ zeros(Y,K) ] +% cov - Sigma(:,:,i) is the covariance given M=i [ repmat(1*eye(Y,Y), [1 1 K]) ] +% weights - W(:,:,i) is the regression matrix given M=i [ randn(Y,X,K) ] + +if nargin==0 + % This occurs if we are trying to load an object from a file. + CPD = init_fields; + clamp = 0; + CPD = class(CPD, 'gmux_CPD', generic_CPD(clamp)); + return; +elseif isa(bnet, 'gmux_CPD') + % This might occur if we are copying an object. + CPD = bnet; + return; +end +CPD = init_fields; + +CPD = class(CPD, 'gmux_CPD', generic_CPD(1)); + +ns = bnet.node_sizes; +ps = parents(bnet.dag, self); +dps = myintersect(ps, bnet.dnodes); +cps = myintersect(ps, bnet.cnodes); +fam_sz = ns([ps self]); + +CPD.self = self; +CPD.sizes = fam_sz; + +% Figure out which (if any) of the parents are discrete, and which cts, and how big they are +% dps = discrete parents, cps = cts parents +CPD.cps = find_equiv_posns(cps, ps); % cts parent index +CPD.dps = find_equiv_posns(dps, ps); +if length(CPD.dps) ~= 1 + error('gmux must have exactly 1 discrete parent') +end +ss = fam_sz(end); +cpsz = fam_sz(CPD.cps(1)); % in gaussian_CPD, cpsz = sum(fam_sz(CPD.cps)) +if ~all(fam_sz(CPD.cps) == cpsz) + error('all cts parents must have same size') +end +dpsz = fam_sz(CPD.dps); +if dpsz ~= length(cps) + error(['the arity of the mux node is ' num2str(dpsz) ... + ' but there are ' num2str(length(cps)) ' cts parents']); +end + +% set default params +%CPD.mean = zeros(ss, 1); +%CPD.cov = eye(ss); +%CPD.weights = randn(ss, cpsz); +CPD.mean = zeros(ss, dpsz); +CPD.cov = 1*repmat(eye(ss), [1 1 dpsz]); +CPD.weights = randn(ss, cpsz, dpsz); + +args = varargin; +nargs = length(args); +for i=1:2:nargs + switch args{i}, + case 'mean', CPD.mean = args{i+1}; + case 'cov', CPD.cov = args{i+1}; + case 'weights', CPD.weights = args{i+1}; + otherwise, + error(['invalid argument name ' args{i}]); + end +end + +%%%%%%%%%%% + +function CPD = init_fields() +% This ensures we define the fields in the same order +% no matter whether we load an object from a file, +% or create it from scratch. (Matlab requires this.) + +CPD.self = []; +CPD.sizes = []; +CPD.cps = []; +CPD.dps = []; +CPD.mean = []; +CPD.cov = []; +CPD.weights = []; + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/sample_node.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@gmux_CPD/sample_node.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ +function y = sample_node(CPD, pev) +% SAMPLE_NODE Draw a random sample from P(Xi | x(pi_i), theta_i) (gmux) +% y = sample_node(CPD, parent_evidence) +% +% parent_ev{i} is the value of the i'th parent + +dpval = pev{CPD.dps}; +x = pev{CPD.cps(dpval)}; +y = gsamp(CPD.mean(:,dpval) + CPD.weights(:,:,dpval)*x(:), CPD.cov(:,:,dpval), 1); +y = y(:); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CPD_to_CPT.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CPD_to_CPT.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,35 @@ +function CPT = CPD_to_CPT(CPD) +% Compute the big CPT for an HHMM Q node (including F parents) +% by combining internal transprob and startprob +% function CPT = CPD_to_CPT(CPD) + +Qsz = CPD.Qsz; + +if ~isempty(CPD.Fbelow_ndx) + if ~isempty(CPD.Fself_ndx) % general case + error('not implemented') + else % no F from self, hence no startprob (top level) + nps = length(CPD.dom_sz)-1; % num parents + CPT = 0*myones(CPD.dom_sz); + % when Fself=1, the CPT(i,j) = delta(i,j) for all k + for k=1:prod(CPD.Qpsizes) + Qps_vals = ind2subv(CPD.Qpsizes, k); + ndx = mk_multi_index(nps+1, [CPD.Fbelow_ndx CPD.Qps_ndx], [1 Qps_vals]); + CPT(ndx{:}) = eye(Qsz); % CPT(:,2,k,:) or CPT(:,k,2,:) etc + end + ndx = mk_multi_index(nps+1, CPD.Fbelow_ndx, 2); + CPT(ndx{:}) = CPD.transprob; % we assume transprob is in topo order + end +else % no F signal from below + if ~isempty(CPD.Fself_ndx) % bottom level + nps = length(CPD.dom_sz)-1; % num parents + CPT = 0*myones(CPD.dom_sz); + ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 1); + CPT(ndx{:}) = CPD.transprob; + ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 2); + CPT(ndx{:}) = CPD.startprob; + else % no F from self + error('An hhmmQ node without any F parents is just a tabular_CPD') + end +end + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,6 @@ +/CPD_to_CPT.m/1.1.1.1/Tue Sep 24 12:46:46 2002// +/hhmm2Q_CPD.m/1.1.1.1/Tue Sep 24 22:34:40 2002// +/maximize_params.m/1.1.1.1/Tue Sep 24 22:44:36 2002// +/reset_ess.m/1.1.1.1/Tue Sep 24 22:36:16 2002// +/update_ess.m/1.1.1.1/Tue Sep 24 22:43:30 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@hhmm2Q_CPD diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/hhmm2Q_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/hhmm2Q_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,65 @@ +function CPD = hhmm2Q_CPD(bnet, self, varargin) +% HHMMQ_CPD Make the CPD for a Q node in a 2 level hierarchical HMM +% CPD = hhmmQ_CPD(bnet, self, ...) +% +% Fself(t-1) Qps +% \ | +% \ v +% Qold(t-1) -> Q(t) +% / +% / +% Fbelow(t-1) +% +% +% optional args [defaults] +% +% Fself - node number <= ss +% Fbelow - node number <= ss +% Qps - node numbers (all <= 2*ss) - uses 2TBN indexing +% transprob - CPT for when Fbelow=2 and Fself=1 +% startprob - CPT for when Fbelow=2 and Fself=2 +% If Fbelow=1, we cannot change state. + +ss = bnet.nnodes_per_slice; +ns = bnet.node_sizes(:); + +% set default arguments +Fself = []; +Fbelow = []; +Qps = []; +startprob = []; +transprob = []; + +for i=1:2:length(varargin) + switch varargin{i}, + case 'Fself', Fself = varargin{i+1}; + case 'Fbelow', Fbelow = varargin{i+1}; + case 'Qps', Qps = varargin{i+1}; + case 'transprob', transprob = varargin{i+1}; + case 'startprob', startprob = varargin{i+1}; + end +end + +ps = parents(bnet.dag, self); +old_self = self-ss; +ndsz = ns(:)'; +CPD.dom_sz = [ndsz(ps) ns(self)]; +CPD.Fself_ndx = find_equiv_posns(Fself, ps); +CPD.Fbelow_ndx = find_equiv_posns(Fbelow, ps); +Qps = mysetdiff(ps, [Fself Fbelow old_self]); +CPD.Qps_ndx = find_equiv_posns(Qps, ps); +CPD.old_self_ndx = find_equiv_posns(old_self, ps); + +Qps = ps(CPD.Qps_ndx); +CPD.Qsz = ns(self); +CPD.Qpsizes = ns(Qps); + +CPD.transprob = transprob; +CPD.startprob = startprob; +CPD.start_counts = []; +CPD.trans_counts = []; + +CPD = class(CPD, 'hhmm2Q_CPD', discrete_CPD(0, CPD.dom_sz)); + + + diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/maximize_params.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/maximize_params.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,10 @@ +function CPD = maximize_params(CPD, temp) +% MAXIMIZE_PARAMS Set the params of a hhmmQ node to their ML/MAP values. +% CPD = maximize_params(CPD, temperature) + +if sum(CPD.start_counts(:)) > 0 + CPD.startprob = mk_stochastic(CPD.start_counts); +end +if sum(CPD.trans_counts(:)) > 0 + CPD.transprob = mk_stochastic(CPD.trans_counts); +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/reset_ess.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/reset_ess.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,12 @@ +function CPD = reset_ess(CPD) +% RESET_ESS Reset the Expected Sufficient Statistics of a hhmm2 Q node. +% CPD = reset_ess(CPD) + +domsz = CPD.dom_sz; +domsz(CPD.Fself_ndx) = 1; +domsz(CPD.Fbelow_ndx) = 1; +Qdom_sz = domsz; +Qdom_sz(Qdom_sz==1)=[]; % get rid of dimensions of size 1 + +CPD.start_counts = zeros(Qdom_sz); +CPD.trans_counts = zeros(Qdom_sz); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/update_ess.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmm2Q_CPD/update_ess.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,26 @@ +function CPD = update_ess(CPD, fmarginal, evidence, ns, cnodes, hidden_bitv) + +marg = add_ev_to_dmarginal(fmarginal, evidence, ns); + +nps = length(CPD.dom_sz)-1; % num parents + +if ~isempty(CPD.Fbelow_ndx) + if ~isempty(CPD.Fself_ndx) % general case + ndx = mk_multi_index(nps+1, [CPD.Fbelow_ndx CPD.Fself_ndx], [2 1]); + CPD.trans_counts = CPD.trans_counts + squeeze(marg.T(ndx{:})); + ndx = mk_multi_index(nps+1, [CPD.Fbelow_ndx CPD.Fself_ndx], [2 2]); + CPD.start_counts = CPD.start_counts + squeeze(marg.T(ndx{:})); + else % no F from self, hence no startprob (top level) + ndx = mk_multi_index(nps+1, CPD.Fbelow_ndx, 2); + CPD.trans_counts = CPD.trans_counts + squeeze(marg.T(ndx{:})); + end +else % no F signal from below + if ~isempty(CPD.Fself_ndx) % self F (bottom level) + ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 1); + CPD.trans_counts = CPD.trans_counts + squeeze(marg.T(ndx{:})); + ndx = mk_multi_index(nps+1, CPD.Fself_ndx, 2); + CPD.start_counts = CPD.start_counts + squeeze(marg.T(ndx{:})); + else % no F from self or below + error('no F signal') + end +end diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,7 @@ +/hhmmF_CPD.m/1.1.1.1/Mon Jun 24 23:38:24 2002// +/log_prior.m/1.1.1.1/Wed May 29 15:59:54 2002// +/maximize_params.m/1.1.1.1/Wed May 29 15:59:54 2002// +/reset_ess.m/1.1.1.1/Wed May 29 15:59:54 2002// +/update_CPT.m/1.1.1.1/Mon Jun 24 22:45:04 2002// +/update_ess.m/1.1.1.1/Mon Jun 24 23:54:30 2002// +D/Old//// diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@hhmmF_CPD diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Entries --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Entries Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,7 @@ +/hhmmF_CPD.m/1.1.1.1/Mon Jun 24 22:35:06 2002// +/log_prior.m/1.1.1.1/Mon Jun 24 22:35:06 2002// +/maximize_params.m/1.1.1.1/Mon Jun 24 22:35:06 2002// +/reset_ess.m/1.1.1.1/Mon Jun 24 22:35:06 2002// +/update_CPT.m/1.1.1.1/Mon Jun 24 22:35:06 2002// +/update_ess.m/1.1.1.1/Mon Jun 24 22:35:06 2002// +D diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Repository --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Repository Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +FullBNT/BNT/CPDs/@hhmmF_CPD/Old diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Root --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/CVS/Root Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,1 @@ +:ext:nsaunier@bnt.cvs.sourceforge.net:/cvsroot/bnt diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/hhmmF_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/Old/hhmmF_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,76 @@ +function CPD = hhmmF_CPD(bnet, self, Qnodes, d, D, varargin) +% HHMMF_CPD Make the CPD for an F node at depth D of a D-level hierarchical HMM +% CPD = hhmmF_CPD(bnet, self, Qnodes, d, D, ...) +% +% Q(d-1) +% \ +% \ +% F(d) +% / | +% / | +% Q(d) F(d+1) +% +% We assume nodes are ordered (numbered) as follows: +% Q(1), ... Q(d), F(d+1), F(d) +% +% F(d)=2 means level d has finished. The prob this happens depends on Q(d) +% and optionally on Q(d-1), Q(d=1), ..., Q(1). +% Also, level d can only finish if the level below has finished +% (hence the F(d+1) -> F(d) arc). +% +% If d=D, there is no F(d+1), so F(d) is just a regular tabular_CPD. +% If all models always finish in the same state (e.g., their last), +% we don't need to condition on the state of parent models (Q(d-1), ...) +% +% optional args [defaults] +% +% termprob - termprob(k,i,2) = prob finishing given Q(d)=i and Q(1:d-1)=k [ finish in last state ] +% +% hhmmF_CPD is a subclass of tabular_CPD so we inherit inference methods like CPD_to_pot, etc. +% +% We create an isolated tabular_CPD with no F parent to learn termprob +% so we can avail of e.g., entropic or Dirichlet priors. +% +% For details, see "Linear-time inference in hierarchical HMMs", Murphy and Paskin, NIPS'01. + + +ps = parents(bnet.dag, self); +Qps = myintersect(ps, Qnodes); +F = mysetdiff(ps, Qps); +CPD.Q = Qps(end); % Q(d) +assert(CPD.Q == Qnodes(d)); +CPD.Qps = Qps(1:end-1); % all Q parents except Q(d), i.e., calling context + +ns = bnet.node_sizes(:); +CPD.Qsizes = ns(Qnodes); +CPD.d = d; +CPD.D = D; + +Qsz = ns(CPD.Q); +Qpsz = prod(ns(CPD.Qps)); + +% set default arguments +p = 0.9; +%termprob(k,i,t) Might terminate if i=Qsz; will not terminate if i % We sum over the possibilities that F(d+1) = 1 or 2 + +obs_self = ~hidden_bitv(Q); +if obs_self + self_val = evidence{Q}; +end + +if isempty(Qps) % independent of parent context + counts = zeros(Qsz, 2); + %fmarginal.T(Q(d), F(d+1), F(d)) + if obs_self + marg = myreshape(fmarginal.T, [1 2 2]); + counts(self_val,:) = marg(1,2,:); + %counts(self_val,:) = marg(1,1,:) + marg(1,2,:); + else + marg = myreshape(fmarginal.T, [Qsz 2 2]); + counts = squeeze(marg(:,2,:)); + %counts = squeeze(marg(:,2,:)) + squeeze(marg(:,1,:)); + end +else + counts = zeros(Qpsz, Qsz, 2); + %fmarginal.T(Q(1:d-1), Q(d), F(d+1), F(d)) + obs_Qps = ~any(hidden_bitv(Qps)); % we assume that all or none of the Q parents are observed + if obs_Qps + Qps_val = subv2ind(Qpsz, cat(1, evidence{Qps})); + end + if obs_self & obs_Qps + marg = myreshape(fmarginal.T, [1 1 2 2]); + counts(Qps_val, self_val, :) = squeeze(marg(1,1,2,:)); + %counts(Qps_val, self_val, :) = squeeze(marg(1,1,2,:)) + squeeze(marg(1,1,1,:)); + elseif ~obs_self & obs_Qps + marg = myreshape(fmarginal.T, [1 Qsz 2 2]); + counts(Qps_val, :, :) = squeeze(marg(1,:,2,:)); + %counts(Qps_val, :, :) = squeeze(marg(1,:,2,:)) + squeeze(marg(1,:,1,:)); + elseif obs_self & ~obs_Qps + error('not yet implemented') + else + marg = myreshape(fmarginal.T, [Qpsz Qsz 2 2]); + counts(:, :, :) = squeeze(marg(:,:,2,:)); + %counts(:, :, :) = squeeze(marg(:,:,2,:)) + squeeze(marg(:,:,1,:)); + end +end + +CPD.sub_CPD_term = update_ess_simple(CPD.sub_CPD_term, counts); diff -r 000000000000 -r cc4b1211e677 toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/hhmmF_CPD.m --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolboxes/FullBNT-1.0.7/bnt/CPDs/@hhmmF_CPD/hhmmF_CPD.m Fri Aug 19 13:07:06 2016 +0200 @@ -0,0 +1,73 @@ +function CPD = hhmmF_CPD(bnet, self, Qself, Fbelow, varargin) +% HHMMF_CPD Make the CPD for an F node in a hierarchical HMM +% CPD = hhmmF_CPD(bnet, self, Qself, Fbelow, ...) +% +% Qps +% \ +% \ +% Fself +% / | +% / | +% Qself Fbelow +% +% We assume nodes are ordered (numbered) as follows: Qps, Q, Fbelow, F +% All nodes numbers should be from slice 1. +% +% If Fbelow if missing, this becomes a regular tabular_CPD. +% Qps may be omitted. +% +% optional args [defaults] +% +% Qps - node numbers. +% termprob - termprob(k,i,2) = prob finishing given Q(d)=i and Q(1:d-1)=k [ finish in last state wp 0.9] +% +% hhmmF_CPD is a subclass of tabular_CPD so we inherit inference methods like CPD_to_pot, etc. +% +% We create an isolated tabular_CPD with no F parent to learn termprob +% so we can avail of e.g., entropic or Dirichlet priors. +% +% For details, see "Linear-time inference in hierarchical HMMs", Murphy and Paskin, NIPS'01. + + + +Qps = []; +% get parents +for i=1:2:length(varargin) + switch varargin{i}, + case 'Qps', Qps = varargin{i+1}; + end +end + +ns = bnet.node_sizes(:); +Qsz = ns(Qself); +Qpsz = prod(ns(Qps)); +CPD.Qsz = Qsz; +CPD.Qpsz = Qpsz; + +ps = parents(bnet.dag, self); +CPD.Fbelow_ndx = find_equiv_posns(Fbelow, ps); +CPD.Qps_ndx = find_equiv_posns(Qps, ps); +CPD.Qself_ndx = find_equiv_posns(Qself, ps); + +% set default arguments +p = 0.9; +%termprob(k,i,t) Might terminate if i=Qsz; will not terminate if i