{"id":"https://openalex.org/W2409534643","doi":"https://doi.org/10.1145/2911996.2912051","title":"Multimodal Deep Convolutional Neural Network for Audio-Visual Emotion Recognition","display_name":"Multimodal Deep Convolutional Neural Network for Audio-Visual Emotion Recognition","publication_year":2016,"publication_date":"2016-06-06","ids":{"openalex":"https://openalex.org/W2409534643","doi":"https://doi.org/10.1145/2911996.2912051","mag":"2409534643"},"language":"en","primary_location":{"id":"doi:10.1145/2911996.2912051","is_oa":false,"landing_page_url":"https://doi.org/10.1145/2911996.2912051","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"Proceedings of the 2016 ACM on International Conference on Multimedia Retrieval","raw_type":"proceedings-article"},"type":"article","indexed_in":["crossref"],"open_access":{"is_oa":false,"oa_status":"closed","oa_url":null,"any_repository_has_fulltext":false},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5101891025","display_name":"Shiqing Zhang","orcid":"https://orcid.org/0000-0001-8184-5088"},"institutions":[{"id":"https://openalex.org/I20231570","display_name":"Peking University","ror":"https://ror.org/02v51f717","country_code":"CN","type":"education","lineage":["https://openalex.org/I20231570"]}],"countries":["CN"],"is_corresponding":true,"raw_author_name":"Shiqing Zhang","raw_affiliation_strings":["Peking University, Beijing, China"],"affiliations":[{"raw_affiliation_string":"Peking University, Beijing, China","institution_ids":["https://openalex.org/I20231570"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5055433405","display_name":"Shiliang Zhang","orcid":"https://orcid.org/0000-0001-9053-9314"},"institutions":[{"id":"https://openalex.org/I20231570","display_name":"Peking University","ror":"https://ror.org/02v51f717","country_code":"CN","type":"education","lineage":["https://openalex.org/I20231570"]}],"countries":["CN"],"is_corresponding":false,"raw_author_name":"Shiliang Zhang","raw_affiliation_strings":["Peking University, Beijing, China"],"affiliations":[{"raw_affiliation_string":"Peking University, Beijing, China","institution_ids":["https://openalex.org/I20231570"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5058066577","display_name":"Tiejun Huang","orcid":"https://orcid.org/0000-0002-4234-6099"},"institutions":[{"id":"https://openalex.org/I20231570","display_name":"Peking University","ror":"https://ror.org/02v51f717","country_code":"CN","type":"education","lineage":["https://openalex.org/I20231570"]}],"countries":["CN"],"is_corresponding":false,"raw_author_name":"Tiejun Huang","raw_affiliation_strings":["Peking University, Beijing, China"],"affiliations":[{"raw_affiliation_string":"Peking University, Beijing, China","institution_ids":["https://openalex.org/I20231570"]}]},{"author_position":"last","author":{"id":"https://openalex.org/A5018478553","display_name":"Wen Gao","orcid":"https://orcid.org/0000-0002-8070-802X"},"institutions":[{"id":"https://openalex.org/I20231570","display_name":"Peking University","ror":"https://ror.org/02v51f717","country_code":"CN","type":"education","lineage":["https://openalex.org/I20231570"]}],"countries":["CN"],"is_corresponding":false,"raw_author_name":"Wen Gao","raw_affiliation_strings":["Peking University, Beijing, China"],"affiliations":[{"raw_affiliation_string":"Peking University, Beijing, China","institution_ids":["https://openalex.org/I20231570"]}]}],"institutions":[],"countries_distinct_count":1,"institutions_distinct_count":4,"corresponding_author_ids":["https://openalex.org/A5101891025"],"corresponding_institution_ids":["https://openalex.org/I20231570"],"apc_list":null,"apc_paid":null,"fwci":9.979,"has_fulltext":false,"cited_by_count":100,"citation_normalized_percentile":{"value":0.98330748,"is_in_top_1_percent":false,"is_in_top_10_percent":true},"cited_by_percentile_year":{"min":97,"max":99},"biblio":{"volume":null,"issue":null,"first_page":"281","last_page":"284"},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T10667","display_name":"Emotion and Mood Recognition","score":0.9987000226974487,"subfield":{"id":"https://openalex.org/subfields/3205","display_name":"Experimental and Cognitive Psychology"},"field":{"id":"https://openalex.org/fields/32","display_name":"Psychology"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}},"topics":[{"id":"https://openalex.org/T10667","display_name":"Emotion and Mood Recognition","score":0.9987000226974487,"subfield":{"id":"https://openalex.org/subfields/3205","display_name":"Experimental and Cognitive Psychology"},"field":{"id":"https://openalex.org/fields/32","display_name":"Psychology"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}},{"id":"https://openalex.org/T10860","display_name":"Speech and Audio Processing","score":0.9976000189781189,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T12032","display_name":"Multisensory perception and integration","score":0.9933000206947327,"subfield":{"id":"https://openalex.org/subfields/3205","display_name":"Experimental and Cognitive Psychology"},"field":{"id":"https://openalex.org/fields/32","display_name":"Psychology"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/computer-science","display_name":"Computer science","score":0.7933715581893921},{"id":"https://openalex.org/keywords/convolutional-neural-network","display_name":"Convolutional neural network","score":0.6921459436416626},{"id":"https://openalex.org/keywords/semantic-gap","display_name":"Semantic gap","score":0.6711797118186951},{"id":"https://openalex.org/keywords/artificial-intelligence","display_name":"Artificial intelligence","score":0.586513340473175},{"id":"https://openalex.org/keywords/deep-learning","display_name":"Deep learning","score":0.5472506880760193},{"id":"https://openalex.org/keywords/speech-recognition","display_name":"Speech recognition","score":0.5365832448005676},{"id":"https://openalex.org/keywords/audio-visual","display_name":"Audio visual","score":0.5104979276657104},{"id":"https://openalex.org/keywords/feature-extraction","display_name":"Feature extraction","score":0.44439876079559326},{"id":"https://openalex.org/keywords/emotion-recognition","display_name":"Emotion recognition","score":0.4255087375640869},{"id":"https://openalex.org/keywords/pattern-recognition","display_name":"Pattern recognition (psychology)","score":0.372908353805542},{"id":"https://openalex.org/keywords/image","display_name":"Image (mathematics)","score":0.17235857248306274},{"id":"https://openalex.org/keywords/multimedia","display_name":"Multimedia","score":0.06722015142440796},{"id":"https://openalex.org/keywords/image-retrieval","display_name":"Image retrieval","score":0.06378629803657532}],"concepts":[{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.7933715581893921},{"id":"https://openalex.org/C81363708","wikidata":"https://www.wikidata.org/wiki/Q17084460","display_name":"Convolutional neural network","level":2,"score":0.6921459436416626},{"id":"https://openalex.org/C86034646","wikidata":"https://www.wikidata.org/wiki/Q474311","display_name":"Semantic gap","level":4,"score":0.6711797118186951},{"id":"https://openalex.org/C154945302","wikidata":"https://www.wikidata.org/wiki/Q11660","display_name":"Artificial intelligence","level":1,"score":0.586513340473175},{"id":"https://openalex.org/C108583219","wikidata":"https://www.wikidata.org/wiki/Q197536","display_name":"Deep learning","level":2,"score":0.5472506880760193},{"id":"https://openalex.org/C28490314","wikidata":"https://www.wikidata.org/wiki/Q189436","display_name":"Speech recognition","level":1,"score":0.5365832448005676},{"id":"https://openalex.org/C3017588708","wikidata":"https://www.wikidata.org/wiki/Q758901","display_name":"Audio visual","level":2,"score":0.5104979276657104},{"id":"https://openalex.org/C52622490","wikidata":"https://www.wikidata.org/wiki/Q1026626","display_name":"Feature extraction","level":2,"score":0.44439876079559326},{"id":"https://openalex.org/C2777438025","wikidata":"https://www.wikidata.org/wiki/Q1339090","display_name":"Emotion recognition","level":2,"score":0.4255087375640869},{"id":"https://openalex.org/C153180895","wikidata":"https://www.wikidata.org/wiki/Q7148389","display_name":"Pattern recognition (psychology)","level":2,"score":0.372908353805542},{"id":"https://openalex.org/C115961682","wikidata":"https://www.wikidata.org/wiki/Q860623","display_name":"Image (mathematics)","level":2,"score":0.17235857248306274},{"id":"https://openalex.org/C49774154","wikidata":"https://www.wikidata.org/wiki/Q131765","display_name":"Multimedia","level":1,"score":0.06722015142440796},{"id":"https://openalex.org/C1667742","wikidata":"https://www.wikidata.org/wiki/Q10927554","display_name":"Image retrieval","level":3,"score":0.06378629803657532}],"mesh":[],"locations_count":1,"locations":[{"id":"doi:10.1145/2911996.2912051","is_oa":false,"landing_page_url":"https://doi.org/10.1145/2911996.2912051","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"Proceedings of the 2016 ACM on International Conference on Multimedia Retrieval","raw_type":"proceedings-article"}],"best_oa_location":null,"sustainable_development_goals":[],"awards":[],"funders":[],"has_content":{"grobid_xml":false,"pdf":false},"content_urls":null,"referenced_works_count":16,"referenced_works":["https://openalex.org/W147964346","https://openalex.org/W2009059481","https://openalex.org/W2020676607","https://openalex.org/W2056907797","https://openalex.org/W2071249869","https://openalex.org/W2071392655","https://openalex.org/W2100495367","https://openalex.org/W2114025269","https://openalex.org/W2123099218","https://openalex.org/W2145310492","https://openalex.org/W2156503193","https://openalex.org/W2157297238","https://openalex.org/W2158874389","https://openalex.org/W2163605009","https://openalex.org/W2168854967","https://openalex.org/W3141819983"],"related_works":["https://openalex.org/W2271369634","https://openalex.org/W2350550760","https://openalex.org/W578794879","https://openalex.org/W2074916782","https://openalex.org/W2625296515","https://openalex.org/W3137890128","https://openalex.org/W4245955731","https://openalex.org/W2393726419","https://openalex.org/W2330333072","https://openalex.org/W2380912101"],"abstract_inverted_index":{"Emotion":[0],"recognition":[1,85],"is":[2,63,119,153],"a":[3,42,57,106,111,123],"challenging":[4],"task":[5],"because":[6],"of":[7,24,99,113,142,149],"the":[8,15,21,29,37,51,89,97,134,139,143,147],"emotional":[9,38],"gap":[10,39],"between":[11],"subjective":[12],"emotion":[13,84,129,165],"and":[14,53,82,93,159],"low-level":[16],"audio-visual":[17,125,136],"features.":[18],"Inspired":[19],"by":[20,110],"recent":[22],"success":[23,168],"deep":[25,58],"learning":[26],"in":[27,56,105,162,172],"bridging":[28],"semantic":[30],"gap,":[31],"this":[32,152,173],"paper":[33],"proposes":[34],"to":[35,79,121],"bridge":[36],"based":[40],"on":[41,73,88,133],"multimodal":[43,61],"Deep":[44],"Convolution":[45],"Neural":[46],"Network":[47],"(DCNN),":[48],"which":[49],"fuses":[50],"audio":[52,81,158],"visual":[54,83,160],"cues":[55,161],"model.":[59],"This":[60],"DCNN":[62,70,163],"trained":[64,120],"with":[65],"two":[66,69,101],"stages.":[67],"First,":[68],"models":[71],"pre-trained":[72],"large-scale":[74],"image":[75],"data":[76],"are":[77,103],"fine-tuned":[78],"perform":[80],"tasks":[86],"respectively":[87],"corresponding":[90],"labeled":[91],"speech":[92],"face":[94],"data.":[95],"Second,":[96],"outputs":[98],"these":[100],"DCNNs":[102],"integrated":[104],"fusion":[107,117],"network":[108,118],"constructed":[109],"number":[112],"fully-connected":[114],"layers.":[115],"The":[116],"obtain":[122],"joint":[124],"feature":[126],"representation":[127],"for":[128,164],"recognition.":[130,166],"Experimental":[131],"results":[132],"RML":[135],"database":[137],"demonstrates":[138],"promising":[140],"performance":[141],"proposed":[144],"method.":[145],"To":[146],"best":[148],"our":[150],"knowledge,":[151],"an":[154],"early":[155],"work":[156],"fusing":[157],"Its":[167],"guarantees":[169],"further":[170],"research":[171],"direction.":[174]},"counts_by_year":[{"year":2026,"cited_by_count":1},{"year":2025,"cited_by_count":8},{"year":2024,"cited_by_count":6},{"year":2023,"cited_by_count":12},{"year":2022,"cited_by_count":14},{"year":2021,"cited_by_count":11},{"year":2020,"cited_by_count":15},{"year":2019,"cited_by_count":13},{"year":2018,"cited_by_count":10},{"year":2017,"cited_by_count":10}],"updated_date":"2025-11-06T03:46:38.306776","created_date":"2025-10-10T00:00:00"}
