{"id":"https://openalex.org/W3015427680","doi":"https://doi.org/10.1109/icassp40776.2020.9054441","title":"Attention Driven Fusion for Multi-Modal Emotion Recognition","display_name":"Attention Driven Fusion for Multi-Modal Emotion Recognition","publication_year":2020,"publication_date":"2020-04-09","ids":{"openalex":"https://openalex.org/W3015427680","doi":"https://doi.org/10.1109/icassp40776.2020.9054441","mag":"3015427680"},"language":"en","primary_location":{"id":"doi:10.1109/icassp40776.2020.9054441","is_oa":false,"landing_page_url":"https://doi.org/10.1109/icassp40776.2020.9054441","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","raw_type":"proceedings-article"},"type":"article","indexed_in":["crossref"],"open_access":{"is_oa":true,"oa_status":"green","oa_url":"https://eprints.qut.edu.au/202004/1/ATTENTION_DRIVEN_FUSION_FOR_MULTI_MODAL_EMOTION_RECOGNITION.pdf","any_repository_has_fulltext":true},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5003739758","display_name":"Darshana Priyasad","orcid":"https://orcid.org/0000-0001-8431-4194"},"institutions":[{"id":"https://openalex.org/I160993911","display_name":"Queensland University of Technology","ror":"https://ror.org/03pnv4752","country_code":"AU","type":"education","lineage":["https://openalex.org/I160993911"]}],"countries":["AU"],"is_corresponding":true,"raw_author_name":"Darshana Priyasad","raw_affiliation_strings":["Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia"],"affiliations":[{"raw_affiliation_string":"Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia","institution_ids":["https://openalex.org/I160993911"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5000736425","display_name":"Tharindu Fernando","orcid":"https://orcid.org/0000-0002-6935-1816"},"institutions":[{"id":"https://openalex.org/I160993911","display_name":"Queensland University of Technology","ror":"https://ror.org/03pnv4752","country_code":"AU","type":"education","lineage":["https://openalex.org/I160993911"]}],"countries":["AU"],"is_corresponding":false,"raw_author_name":"Tharindu Fernando","raw_affiliation_strings":["Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia"],"affiliations":[{"raw_affiliation_string":"Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia","institution_ids":["https://openalex.org/I160993911"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5083626840","display_name":"Simon Denman","orcid":"https://orcid.org/0000-0002-0983-5480"},"institutions":[{"id":"https://openalex.org/I160993911","display_name":"Queensland University of Technology","ror":"https://ror.org/03pnv4752","country_code":"AU","type":"education","lineage":["https://openalex.org/I160993911"]}],"countries":["AU"],"is_corresponding":false,"raw_author_name":"Simon Denman","raw_affiliation_strings":["Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia"],"affiliations":[{"raw_affiliation_string":"Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia","institution_ids":["https://openalex.org/I160993911"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5055128383","display_name":"Sridha Sridharan","orcid":"https://orcid.org/0000-0003-4316-9001"},"institutions":[{"id":"https://openalex.org/I160993911","display_name":"Queensland University of Technology","ror":"https://ror.org/03pnv4752","country_code":"AU","type":"education","lineage":["https://openalex.org/I160993911"]}],"countries":["AU"],"is_corresponding":false,"raw_author_name":"Sridha Sridharan","raw_affiliation_strings":["Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia"],"affiliations":[{"raw_affiliation_string":"Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia","institution_ids":["https://openalex.org/I160993911"]}]},{"author_position":"last","author":{"id":"https://openalex.org/A5034095159","display_name":"Clinton Fookes","orcid":"https://orcid.org/0000-0002-8515-6324"},"institutions":[{"id":"https://openalex.org/I160993911","display_name":"Queensland University of Technology","ror":"https://ror.org/03pnv4752","country_code":"AU","type":"education","lineage":["https://openalex.org/I160993911"]}],"countries":["AU"],"is_corresponding":false,"raw_author_name":"Clinton Fookes","raw_affiliation_strings":["Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia"],"affiliations":[{"raw_affiliation_string":"Speech and Audio Research Lab, SAIVT Queensland University of Technology, Brisbane, Australia","institution_ids":["https://openalex.org/I160993911"]}]}],"institutions":[],"countries_distinct_count":1,"institutions_distinct_count":5,"corresponding_author_ids":["https://openalex.org/A5003739758"],"corresponding_institution_ids":["https://openalex.org/I160993911"],"apc_list":null,"apc_paid":null,"fwci":8.532,"has_fulltext":true,"cited_by_count":76,"citation_normalized_percentile":{"value":0.98075907,"is_in_top_1_percent":false,"is_in_top_10_percent":true},"cited_by_percentile_year":{"min":94,"max":100},"biblio":{"volume":null,"issue":null,"first_page":"3227","last_page":"3231"},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T10667","display_name":"Emotion and Mood Recognition","score":0.9998000264167786,"subfield":{"id":"https://openalex.org/subfields/3205","display_name":"Experimental and Cognitive Psychology"},"field":{"id":"https://openalex.org/fields/32","display_name":"Psychology"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}},"topics":[{"id":"https://openalex.org/T10667","display_name":"Emotion and Mood Recognition","score":0.9998000264167786,"subfield":{"id":"https://openalex.org/subfields/3205","display_name":"Experimental and Cognitive Psychology"},"field":{"id":"https://openalex.org/fields/32","display_name":"Psychology"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}},{"id":"https://openalex.org/T11309","display_name":"Music and Audio Processing","score":0.9988999962806702,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T10860","display_name":"Speech and Audio Processing","score":0.9983000159263611,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/computer-science","display_name":"Computer science","score":0.7900335788726807},{"id":"https://openalex.org/keywords/recurrent-neural-network","display_name":"Recurrent neural network","score":0.625923216342926},{"id":"https://openalex.org/keywords/artificial-intelligence","display_name":"Artificial intelligence","score":0.6065009832382202},{"id":"https://openalex.org/keywords/speech-recognition","display_name":"Speech recognition","score":0.5644984841346741},{"id":"https://openalex.org/keywords/deep-learning","display_name":"Deep learning","score":0.5396421551704407},{"id":"https://openalex.org/keywords/fuse","display_name":"Fuse (electrical)","score":0.47406166791915894},{"id":"https://openalex.org/keywords/emotion-recognition","display_name":"Emotion recognition","score":0.47205716371536255},{"id":"https://openalex.org/keywords/convolutional-neural-network","display_name":"Convolutional neural network","score":0.4650726318359375},{"id":"https://openalex.org/keywords/pattern-recognition","display_name":"Pattern recognition (psychology)","score":0.4607784152030945},{"id":"https://openalex.org/keywords/filter","display_name":"Filter (signal processing)","score":0.42713767290115356},{"id":"https://openalex.org/keywords/artificial-neural-network","display_name":"Artificial neural network","score":0.3182513117790222}],"concepts":[{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.7900335788726807},{"id":"https://openalex.org/C147168706","wikidata":"https://www.wikidata.org/wiki/Q1457734","display_name":"Recurrent neural network","level":3,"score":0.625923216342926},{"id":"https://openalex.org/C154945302","wikidata":"https://www.wikidata.org/wiki/Q11660","display_name":"Artificial intelligence","level":1,"score":0.6065009832382202},{"id":"https://openalex.org/C28490314","wikidata":"https://www.wikidata.org/wiki/Q189436","display_name":"Speech recognition","level":1,"score":0.5644984841346741},{"id":"https://openalex.org/C108583219","wikidata":"https://www.wikidata.org/wiki/Q197536","display_name":"Deep learning","level":2,"score":0.5396421551704407},{"id":"https://openalex.org/C141353440","wikidata":"https://www.wikidata.org/wiki/Q182221","display_name":"Fuse (electrical)","level":2,"score":0.47406166791915894},{"id":"https://openalex.org/C2777438025","wikidata":"https://www.wikidata.org/wiki/Q1339090","display_name":"Emotion recognition","level":2,"score":0.47205716371536255},{"id":"https://openalex.org/C81363708","wikidata":"https://www.wikidata.org/wiki/Q17084460","display_name":"Convolutional neural network","level":2,"score":0.4650726318359375},{"id":"https://openalex.org/C153180895","wikidata":"https://www.wikidata.org/wiki/Q7148389","display_name":"Pattern recognition (psychology)","level":2,"score":0.4607784152030945},{"id":"https://openalex.org/C106131492","wikidata":"https://www.wikidata.org/wiki/Q3072260","display_name":"Filter (signal processing)","level":2,"score":0.42713767290115356},{"id":"https://openalex.org/C50644808","wikidata":"https://www.wikidata.org/wiki/Q192776","display_name":"Artificial neural network","level":2,"score":0.3182513117790222},{"id":"https://openalex.org/C119599485","wikidata":"https://www.wikidata.org/wiki/Q43035","display_name":"Electrical engineering","level":1,"score":0.0},{"id":"https://openalex.org/C127413603","wikidata":"https://www.wikidata.org/wiki/Q11023","display_name":"Engineering","level":0,"score":0.0},{"id":"https://openalex.org/C31972630","wikidata":"https://www.wikidata.org/wiki/Q844240","display_name":"Computer vision","level":1,"score":0.0}],"mesh":[],"locations_count":2,"locations":[{"id":"doi:10.1109/icassp40776.2020.9054441","is_oa":false,"landing_page_url":"https://doi.org/10.1109/icassp40776.2020.9054441","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","raw_type":"proceedings-article"},{"id":"pmh:oai:eprints.qut.edu.au:202004","is_oa":true,"landing_page_url":null,"pdf_url":"https://eprints.qut.edu.au/202004/1/ATTENTION_DRIVEN_FUSION_FOR_MULTI_MODAL_EMOTION_RECOGNITION.pdf","source":{"id":"https://openalex.org/S4306402607","display_name":"QUT ePrints (Queensland University of Technology)","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":"https://openalex.org/I160993911","host_organization_name":"Queensland University of Technology","host_organization_lineage":["https://openalex.org/I160993911"],"host_organization_lineage_names":[],"type":"repository"},"license":null,"license_id":null,"version":"submittedVersion","is_accepted":false,"is_published":false,"raw_source_name":"Proceedings of ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","raw_type":"Chapter in Book, Report or Conference volume"}],"best_oa_location":{"id":"pmh:oai:eprints.qut.edu.au:202004","is_oa":true,"landing_page_url":null,"pdf_url":"https://eprints.qut.edu.au/202004/1/ATTENTION_DRIVEN_FUSION_FOR_MULTI_MODAL_EMOTION_RECOGNITION.pdf","source":{"id":"https://openalex.org/S4306402607","display_name":"QUT ePrints (Queensland University of Technology)","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":"https://openalex.org/I160993911","host_organization_name":"Queensland University of Technology","host_organization_lineage":["https://openalex.org/I160993911"],"host_organization_lineage_names":[],"type":"repository"},"license":null,"license_id":null,"version":"submittedVersion","is_accepted":false,"is_published":false,"raw_source_name":"Proceedings of ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","raw_type":"Chapter in Book, Report or Conference volume"},"sustainable_development_goals":[{"id":"https://metadata.un.org/sdg/4","display_name":"Quality Education","score":0.44999998807907104}],"awards":[],"funders":[],"has_content":{"grobid_xml":true,"pdf":true},"content_urls":{"pdf":"https://content.openalex.org/works/W3015427680.pdf","grobid_xml":"https://content.openalex.org/works/W3015427680.grobid-xml"},"referenced_works_count":26,"referenced_works":["https://openalex.org/W1832693441","https://openalex.org/W2074788634","https://openalex.org/W2146334809","https://openalex.org/W2250539671","https://openalex.org/W2625297138","https://openalex.org/W2770845466","https://openalex.org/W2797947982","https://openalex.org/W2883430806","https://openalex.org/W2885005742","https://openalex.org/W2889169802","https://openalex.org/W2889191349","https://openalex.org/W2892071465","https://openalex.org/W2912728762","https://openalex.org/W2937584914","https://openalex.org/W2951442257","https://openalex.org/W2962770129","https://openalex.org/W2963785564","https://openalex.org/W2963800675","https://openalex.org/W2964052309","https://openalex.org/W2972495317","https://openalex.org/W2972498864","https://openalex.org/W3008736792","https://openalex.org/W3038653086","https://openalex.org/W4299280181","https://openalex.org/W6750449527","https://openalex.org/W6751883949"],"related_works":["https://openalex.org/W3000097931","https://openalex.org/W2354322770","https://openalex.org/W4237547500","https://openalex.org/W4226493464","https://openalex.org/W4312417841","https://openalex.org/W3193565141","https://openalex.org/W3133861977","https://openalex.org/W3008584592","https://openalex.org/W3167935049","https://openalex.org/W3029198973"],"abstract_inverted_index":{"Deep":[0,32],"learning":[1],"has":[2],"emerged":[3],"as":[4],"a":[5,54,71,91,127,132],"powerful":[6],"alternative":[7],"to":[8,58,82,108,141],"hand-crafted":[9],"methods":[10],"for":[11,66,99],"emotion":[12,23,67,100],"recognition":[13,101],"on":[14,75,147,165],"combined":[15],"acoustic":[16,28,64,84],"and":[17,27,37,47,60,63,102,126],"text":[18,26,62,118],"modalities.":[19],"Baseline":[20],"systems":[21],"model":[22],"information":[24],"in":[25,134,182],"modes":[29],"independently":[30],"using":[31],"Convolutional":[33],"Neural":[34,39],"Networks":[35,40],"(DCNN)":[36],"Recurrent":[38],"(RNN),":[41],"followed":[42,89,130],"by":[43,90,131],"applying":[44,110],"attention,":[45],"fusion,":[46],"classification.":[48,68],"In":[49],"this":[50],"paper,":[51],"we":[52,120,157],"present":[53],"deep":[55],"learning-based":[56],"approach":[57,94],"exploit":[59],"fuse":[61],"data":[65],"We":[69],"utilize":[70],"SincNet":[72],"layer,":[73],"based":[74],"parameterized":[76],"sinc":[77],"functions":[78],"with":[79],"band-pass":[80],"filters,":[81],"extract":[83],"features":[85,106],"from":[86,151],"raw":[87,114],"audio":[88],"DCNN.":[92],"This":[93],"learns":[95],"filter":[96],"banks":[97],"tuned":[98],"provides":[103],"more":[104],"effective":[105],"compared":[107],"directly":[109],"convolutions":[111],"over":[112],"the":[113,143,152,159,162,166,173],"speech":[115],"signal.":[116],"For":[117],"processing,":[119],"use":[121],"two":[122],"branches":[123],"(a":[124],"DCNN":[125],"Bi-direction":[128],"RNN":[129],"DCNN)":[133],"parallel":[135],"where":[136],"cross":[137],"attention":[138],"is":[139],"introduced":[140],"infer":[142],"N-gram":[144],"level":[145],"correlations":[146],"hidden":[148],"representations":[149],"received":[150],"Bi-RNN.":[153],"Following":[154],"existing":[155,177],"state-of-the-art,":[156],"evaluate":[158],"performance":[160],"of":[161],"proposed":[163,174],"system":[164,175],"IEMOCAP":[167],"dataset.":[168],"Experimental":[169],"results":[170],"indicate":[171],"that":[172],"outperforms":[176],"methods,":[178],"achieving":[179],"5.2%":[180],"improvement":[181],"weighted":[183],"accuracy.":[184]},"counts_by_year":[{"year":2026,"cited_by_count":1},{"year":2025,"cited_by_count":14},{"year":2024,"cited_by_count":13},{"year":2023,"cited_by_count":22},{"year":2022,"cited_by_count":12},{"year":2021,"cited_by_count":12},{"year":2020,"cited_by_count":2}],"updated_date":"2026-03-13T16:22:10.518609","created_date":"2025-10-10T00:00:00"}
