{"id":"https://openalex.org/W4405633702","doi":"https://doi.org/10.1109/o-cocosda64382.2024.10800102","title":"A Feedback-Driven Self-Improvement Strategy and Emotion-Aware Vocoder for Emotional Voice Conversion","display_name":"A Feedback-Driven Self-Improvement Strategy and Emotion-Aware Vocoder for Emotional Voice Conversion","publication_year":2024,"publication_date":"2024-10-17","ids":{"openalex":"https://openalex.org/W4405633702","doi":"https://doi.org/10.1109/o-cocosda64382.2024.10800102"},"language":"en","primary_location":{"id":"doi:10.1109/o-cocosda64382.2024.10800102","is_oa":false,"landing_page_url":"https://doi.org/10.1109/o-cocosda64382.2024.10800102","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"2024 27th Conference of the Oriental COCOSDA International Committee for the Co-ordination and Standardisation of Speech Databases and Assessment Techniques (O-COCOSDA)","raw_type":"proceedings-article"},"type":"article","indexed_in":["crossref"],"open_access":{"is_oa":false,"oa_status":"closed","oa_url":null,"any_repository_has_fulltext":false},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5101798383","display_name":"Zhihan Zhang","orcid":"https://orcid.org/0009-0005-4156-5483"},"institutions":[{"id":"https://openalex.org/I177738480","display_name":"Japan Advanced Institute of Science and Technology","ror":"https://ror.org/03frj4r98","country_code":"JP","type":"education","lineage":["https://openalex.org/I177738480"]}],"countries":["JP"],"is_corresponding":true,"raw_author_name":"Zhanhang Zhang","raw_affiliation_strings":["Japan Advanced Institute of Science and Technology,Ishikawa,Japan"],"affiliations":[{"raw_affiliation_string":"Japan Advanced Institute of Science and Technology,Ishikawa,Japan","institution_ids":["https://openalex.org/I177738480"]}]},{"author_position":"last","author":{"id":"https://openalex.org/A5040108974","display_name":"Sakriani Sakti","orcid":"https://orcid.org/0000-0001-5509-8963"},"institutions":[{"id":"https://openalex.org/I177738480","display_name":"Japan Advanced Institute of Science and Technology","ror":"https://ror.org/03frj4r98","country_code":"JP","type":"education","lineage":["https://openalex.org/I177738480"]}],"countries":["JP"],"is_corresponding":false,"raw_author_name":"Sakriani Sakti","raw_affiliation_strings":["Japan Advanced Institute of Science and Technology,Ishikawa,Japan"],"affiliations":[{"raw_affiliation_string":"Japan Advanced Institute of Science and Technology,Ishikawa,Japan","institution_ids":["https://openalex.org/I177738480"]}]}],"institutions":[],"countries_distinct_count":1,"institutions_distinct_count":2,"corresponding_author_ids":["https://openalex.org/A5101798383"],"corresponding_institution_ids":["https://openalex.org/I177738480"],"apc_list":null,"apc_paid":null,"fwci":0.0,"has_fulltext":false,"cited_by_count":0,"citation_normalized_percentile":{"value":0.21750923,"is_in_top_1_percent":false,"is_in_top_10_percent":false},"cited_by_percentile_year":null,"biblio":{"volume":null,"issue":null,"first_page":"1","last_page":"6"},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T10201","display_name":"Speech Recognition and Synthesis","score":0.8525999784469604,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},"topics":[{"id":"https://openalex.org/T10201","display_name":"Speech Recognition and Synthesis","score":0.8525999784469604,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T10860","display_name":"Speech and Audio Processing","score":0.810699999332428,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T12031","display_name":"Speech and dialogue systems","score":0.7968000173568726,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/computer-science","display_name":"Computer science","score":0.6612020134925842},{"id":"https://openalex.org/keywords/speech-recognition","display_name":"Speech recognition","score":0.5962600708007812}],"concepts":[{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.6612020134925842},{"id":"https://openalex.org/C28490314","wikidata":"https://www.wikidata.org/wiki/Q189436","display_name":"Speech recognition","level":1,"score":0.5962600708007812}],"mesh":[],"locations_count":1,"locations":[{"id":"doi:10.1109/o-cocosda64382.2024.10800102","is_oa":false,"landing_page_url":"https://doi.org/10.1109/o-cocosda64382.2024.10800102","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"2024 27th Conference of the Oriental COCOSDA International Committee for the Co-ordination and Standardisation of Speech Databases and Assessment Techniques (O-COCOSDA)","raw_type":"proceedings-article"}],"best_oa_location":null,"sustainable_development_goals":[],"awards":[],"funders":[],"has_content":{"grobid_xml":false,"pdf":false},"content_urls":null,"referenced_works_count":24,"referenced_works":["https://openalex.org/W2077801020","https://openalex.org/W2118774185","https://openalex.org/W2511640485","https://openalex.org/W2747874407","https://openalex.org/W2897353073","https://openalex.org/W2899877258","https://openalex.org/W2962699523","https://openalex.org/W2963470893","https://openalex.org/W2973138167","https://openalex.org/W2996414377","https://openalex.org/W3015241559","https://openalex.org/W3025680351","https://openalex.org/W3096939667","https://openalex.org/W3150572638","https://openalex.org/W3197993066","https://openalex.org/W4205742757","https://openalex.org/W4221147462","https://openalex.org/W4289913100","https://openalex.org/W6763832098","https://openalex.org/W6765779288","https://openalex.org/W6772349387","https://openalex.org/W6778823374","https://openalex.org/W6783867762","https://openalex.org/W6795288823"],"related_works":["https://openalex.org/W4391375266","https://openalex.org/W2899084033","https://openalex.org/W2748952813","https://openalex.org/W2390279801","https://openalex.org/W4391913857","https://openalex.org/W2358668433","https://openalex.org/W4396701345","https://openalex.org/W2376932109","https://openalex.org/W2001405890","https://openalex.org/W4396696052"],"abstract_inverted_index":{"Emotional":[0],"voice":[1],"conversion":[2],"(EVC)":[3],"transforms":[4],"the":[5,59,65,86],"emotional":[6,57,127],"state":[7],"of":[8,67,140],"speech":[9],"while":[10],"preserving":[11],"linguistic":[12],"content":[13],"and":[14,37,50,98,112,119,125,145],"speaker":[15],"identity.":[16],"Although":[17],"sequence-to-sequence":[18],"models":[19],"have":[20],"achieved":[21],"significant":[22],"success":[23,66],"with":[24],"EVC":[25,61,87],"for":[26,34,55],"handling":[27],"limited":[28,126],"non-parallel":[29,124],"data,":[30],"they":[31],"lack":[32],"mechanisms":[33],"automatically":[35],"evaluating":[36],"improving":[38],"their":[39,43,52,68],"own":[40,53],"performance,":[41],"limiting":[42],"potential.":[44],"Unlike":[45],"humans":[46],"who":[47],"can":[48],"hear":[49],"self-assess":[51],"voices":[54],"better":[56],"expression,":[58,142],"existing":[60,135],"systems":[62],"cannot":[63],"evaluate":[64],"con-versions":[69],"or":[70],"improve":[71],"themselves":[72],"accordingly.":[73],"To":[74],"address":[75],"this":[76,131],"gap,":[77],"we":[78],"propose":[79],"a":[80,92,113],"novel":[81],"feedback-driven":[82],"self-improvement":[83],"mechanism":[84,90],"within":[85],"framework.":[88],"This":[89],"allows":[91],"system":[93],"to":[94],"assess":[95],"its":[96,101],"performance":[97,106],"iteratively":[99],"refine":[100],"outputs.":[102],"We":[103],"further":[104],"enhance":[105],"by":[107],"introducing":[108],"an":[109],"emotion-aware":[110],"vocoder":[111],"differentiable":[114],"prosodic":[115],"predictor.":[116],"Our":[117],"objective":[118],"subjective":[120],"evaluations,":[121],"conducted":[122],"using":[123],"datasets,":[128],"demonstrate":[129],"that":[130],"innovative":[132],"framework":[133],"outperforms":[134],"state-of-the-art":[136],"approaches":[137],"in":[138],"terms":[139],"emotion":[141],"audio":[143],"quality,":[144],"inference":[146],"speed.":[147]},"counts_by_year":[],"updated_date":"2025-11-06T03:46:38.306776","created_date":"2025-10-10T00:00:00"}
