{"id":"https://openalex.org/W4385822670","doi":"https://doi.org/10.21437/interspeech.2023-1163","title":"Speech Emotion Recognition by Estimating Emotional Label Sequences with Phoneme Class Attribute","display_name":"Speech Emotion Recognition by Estimating Emotional Label Sequences with Phoneme Class Attribute","publication_year":2023,"publication_date":"2023-08-14","ids":{"openalex":"https://openalex.org/W4385822670","doi":"https://doi.org/10.21437/interspeech.2023-1163"},"language":"en","primary_location":{"id":"doi:10.21437/interspeech.2023-1163","is_oa":false,"landing_page_url":"http://dx.doi.org/10.21437/interspeech.2023-1163","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"INTERSPEECH 2023","raw_type":"proceedings-article"},"type":"article","indexed_in":["crossref"],"open_access":{"is_oa":false,"oa_status":"closed","oa_url":null,"any_repository_has_fulltext":false},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5001641384","display_name":"Ryotaro Nagase","orcid":null},"institutions":[],"countries":[],"is_corresponding":true,"raw_author_name":"Ryotaro Nagase","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"middle","author":{"id":"https://openalex.org/A5051293207","display_name":"Takahiro Fukumori","orcid":"https://orcid.org/0000-0002-4317-9704"},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"Takahiro Fukumori","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"last","author":{"id":"https://openalex.org/A5079911577","display_name":"Yoichi Yamashita","orcid":null},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"Yoichi Yamashita","raw_affiliation_strings":[],"affiliations":[]}],"institutions":[],"countries_distinct_count":0,"institutions_distinct_count":3,"corresponding_author_ids":["https://openalex.org/A5001641384"],"corresponding_institution_ids":[],"apc_list":null,"apc_paid":null,"fwci":0.0,"has_fulltext":false,"cited_by_count":0,"citation_normalized_percentile":{"value":0.12909263,"is_in_top_1_percent":false,"is_in_top_10_percent":false},"cited_by_percentile_year":null,"biblio":{"volume":null,"issue":null,"first_page":"4533","last_page":"4537"},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T10667","display_name":"Emotion and Mood Recognition","score":0.6948000192642212,"subfield":{"id":"https://openalex.org/subfields/3205","display_name":"Experimental and Cognitive Psychology"},"field":{"id":"https://openalex.org/fields/32","display_name":"Psychology"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}},"topics":[{"id":"https://openalex.org/T10667","display_name":"Emotion and Mood Recognition","score":0.6948000192642212,"subfield":{"id":"https://openalex.org/subfields/3205","display_name":"Experimental and Cognitive Psychology"},"field":{"id":"https://openalex.org/fields/32","display_name":"Psychology"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}},{"id":"https://openalex.org/T10860","display_name":"Speech and Audio Processing","score":0.6259999871253967,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T10201","display_name":"Speech Recognition and Synthesis","score":0.6187000274658203,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/speech-recognition","display_name":"Speech recognition","score":0.6991788148880005},{"id":"https://openalex.org/keywords/emotion-recognition","display_name":"Emotion recognition","score":0.6839295029640198},{"id":"https://openalex.org/keywords/class","display_name":"Class (philosophy)","score":0.6496144533157349},{"id":"https://openalex.org/keywords/computer-science","display_name":"Computer science","score":0.6260904669761658},{"id":"https://openalex.org/keywords/natural-language-processing","display_name":"Natural language processing","score":0.4820769429206848},{"id":"https://openalex.org/keywords/artificial-intelligence","display_name":"Artificial intelligence","score":0.4212990701198578},{"id":"https://openalex.org/keywords/pattern-recognition","display_name":"Pattern recognition (psychology)","score":0.36188018321990967}],"concepts":[{"id":"https://openalex.org/C28490314","wikidata":"https://www.wikidata.org/wiki/Q189436","display_name":"Speech recognition","level":1,"score":0.6991788148880005},{"id":"https://openalex.org/C2777438025","wikidata":"https://www.wikidata.org/wiki/Q1339090","display_name":"Emotion recognition","level":2,"score":0.6839295029640198},{"id":"https://openalex.org/C2777212361","wikidata":"https://www.wikidata.org/wiki/Q5127848","display_name":"Class (philosophy)","level":2,"score":0.6496144533157349},{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.6260904669761658},{"id":"https://openalex.org/C204321447","wikidata":"https://www.wikidata.org/wiki/Q30642","display_name":"Natural language processing","level":1,"score":0.4820769429206848},{"id":"https://openalex.org/C154945302","wikidata":"https://www.wikidata.org/wiki/Q11660","display_name":"Artificial intelligence","level":1,"score":0.4212990701198578},{"id":"https://openalex.org/C153180895","wikidata":"https://www.wikidata.org/wiki/Q7148389","display_name":"Pattern recognition (psychology)","level":2,"score":0.36188018321990967}],"mesh":[],"locations_count":2,"locations":[{"id":"doi:10.21437/interspeech.2023-1163","is_oa":false,"landing_page_url":"http://dx.doi.org/10.21437/interspeech.2023-1163","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"INTERSPEECH 2023","raw_type":"proceedings-article"},{"id":"pmh:oai:irdb.nii.ac.jp:01038:0006909553","is_oa":false,"landing_page_url":"https://ritsumei.repo.nii.ac.jp/records/2004025","pdf_url":null,"source":{"id":"https://openalex.org/S7407056385","display_name":"Institutional Repositories DataBase (IRDB)","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":"https://openalex.org/I184597095","host_organization_name":"National Institute of Informatics","host_organization_lineage":["https://openalex.org/I184597095"],"host_organization_lineage_names":[],"type":"repository"},"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"INTERSPEECH 2023","raw_type":"conference paper"}],"best_oa_location":null,"sustainable_development_goals":[],"awards":[],"funders":[],"has_content":{"grobid_xml":false,"pdf":false},"content_urls":null,"referenced_works_count":0,"referenced_works":[],"related_works":["https://openalex.org/W2368454205","https://openalex.org/W2536562190","https://openalex.org/W2370467235","https://openalex.org/W2989824750","https://openalex.org/W2347925354","https://openalex.org/W4238050384","https://openalex.org/W1921169094","https://openalex.org/W366410996","https://openalex.org/W3126677997","https://openalex.org/W1610857240"],"abstract_inverted_index":{"In":[0],"recent":[1],"years,":[2],"much":[3],"research":[4],"has":[5],"been":[6],"into":[7],"speech":[8],"emotion":[9,28,87],"recognition":[10],"(SER)":[11],"using":[12,33,92],"deep":[13],"learning":[14],"to":[15,134],"predict":[16],"emotions":[17,39],"conveyed":[18],"by":[19],"speech.":[20],"We":[21],"studied":[22],"the":[23,27,30,34,51,58,67,86,89,97,119,124,127,130],"method":[24,55],"that":[25,57,95,118],"detected":[26],"for":[29,88,129],"whole":[31,47,90,131],"utterance":[32,91,132],"frame-based":[35,68,93],"SER,":[36,69],"which":[37,62],"estimates":[38],"in":[40,45,65,111],"each":[41],"frame":[42],"rather":[43],"than":[44],"a":[46,114],"utterance.":[48],"One":[49],"of":[50,84,126],"problems":[52],"with":[53],"this":[54,78],"is":[56,63],"emotional":[59],"label":[60],"sequence,":[61],"used":[64],"training":[66],"does":[70],"not":[71],"sufficiently":[72],"consider":[73],"phonemic":[74],"characteristics.":[75],"To":[76],"solve":[77],"problem,":[79],"we":[80,116],"propose":[81],"new":[82],"methods":[83,121],"recognizing":[85],"SER":[94],"considers":[96],"phoneme":[98],"class":[99],"attribute":[100],"such":[101],"as":[102],"vowels,":[103],"voiced":[104],"consonants,":[105,107],"unvoiced":[106],"and":[108],"other":[109],"symbols":[110],"training.":[112],"As":[113],"result,":[115],"found":[117],"proposed":[120],"significantly":[122],"improve":[123],"performance":[125],"result":[128],"compared":[133],"conventional":[135],"methods.":[136]},"counts_by_year":[],"updated_date":"2026-04-04T16:13:02.066488","created_date":"2025-10-10T00:00:00"}
