{"id":"https://openalex.org/W4223587462","doi":"https://doi.org/10.21437/interspeech.2022-926","title":"Reliable Visualization for Deep Speaker Recognition","display_name":"Reliable Visualization for Deep Speaker Recognition","publication_year":2022,"publication_date":"2022-09-16","ids":{"openalex":"https://openalex.org/W4223587462","doi":"https://doi.org/10.21437/interspeech.2022-926"},"language":"en","primary_location":{"id":"doi:10.21437/interspeech.2022-926","is_oa":false,"landing_page_url":"https://doi.org/10.21437/interspeech.2022-926","pdf_url":null,"source":{"id":"https://openalex.org/S4363604309","display_name":"Interspeech 2022","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":null,"host_organization_name":null,"host_organization_lineage":[],"host_organization_lineage_names":[],"type":"conference"},"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"Interspeech 2022","raw_type":"proceedings-article"},"type":"article","indexed_in":["crossref"],"open_access":{"is_oa":false,"oa_status":"closed","oa_url":null,"any_repository_has_fulltext":false},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5039566915","display_name":"Pengqi Li","orcid":null},"institutions":[],"countries":[],"is_corresponding":true,"raw_author_name":"Pengqi Li","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"middle","author":{"id":"https://openalex.org/A5050701255","display_name":"Lantian Li","orcid":"https://orcid.org/0000-0003-4274-7930"},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"Lantian Li","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"middle","author":{"id":"https://openalex.org/A5075133420","display_name":"Askar Hamdulla","orcid":"https://orcid.org/0000-0002-2321-308X"},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"Askar Hamdulla","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"last","author":{"id":"https://openalex.org/A5100391494","display_name":"Dong Wang","orcid":"https://orcid.org/0000-0002-6992-7950"},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"Dong Wang","raw_affiliation_strings":[],"affiliations":[]}],"institutions":[],"countries_distinct_count":0,"institutions_distinct_count":4,"corresponding_author_ids":["https://openalex.org/A5039566915"],"corresponding_institution_ids":[],"apc_list":null,"apc_paid":null,"fwci":0.7271,"has_fulltext":false,"cited_by_count":7,"citation_normalized_percentile":{"value":0.69580909,"is_in_top_1_percent":false,"is_in_top_10_percent":false},"cited_by_percentile_year":{"min":94,"max":97},"biblio":{"volume":null,"issue":null,"first_page":null,"last_page":null},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T10201","display_name":"Speech Recognition and Synthesis","score":0.9986000061035156,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},"topics":[{"id":"https://openalex.org/T10201","display_name":"Speech Recognition and Synthesis","score":0.9986000061035156,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T11309","display_name":"Music and Audio Processing","score":0.995199978351593,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T10181","display_name":"Natural Language Processing Techniques","score":0.989799976348877,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/computer-science","display_name":"Computer science","score":0.8103487491607666},{"id":"https://openalex.org/keywords/visualization","display_name":"Visualization","score":0.7016274333000183},{"id":"https://openalex.org/keywords/speaker-recognition","display_name":"Speaker recognition","score":0.5962214469909668},{"id":"https://openalex.org/keywords/speech-recognition","display_name":"Speech recognition","score":0.5638254880905151},{"id":"https://openalex.org/keywords/artificial-intelligence","display_name":"Artificial intelligence","score":0.45520973205566406}],"concepts":[{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.8103487491607666},{"id":"https://openalex.org/C36464697","wikidata":"https://www.wikidata.org/wiki/Q451553","display_name":"Visualization","level":2,"score":0.7016274333000183},{"id":"https://openalex.org/C133892786","wikidata":"https://www.wikidata.org/wiki/Q1145189","display_name":"Speaker recognition","level":2,"score":0.5962214469909668},{"id":"https://openalex.org/C28490314","wikidata":"https://www.wikidata.org/wiki/Q189436","display_name":"Speech recognition","level":1,"score":0.5638254880905151},{"id":"https://openalex.org/C154945302","wikidata":"https://www.wikidata.org/wiki/Q11660","display_name":"Artificial intelligence","level":1,"score":0.45520973205566406}],"mesh":[],"locations_count":1,"locations":[{"id":"doi:10.21437/interspeech.2022-926","is_oa":false,"landing_page_url":"https://doi.org/10.21437/interspeech.2022-926","pdf_url":null,"source":{"id":"https://openalex.org/S4363604309","display_name":"Interspeech 2022","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":null,"host_organization_name":null,"host_organization_lineage":[],"host_organization_lineage_names":[],"type":"conference"},"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"Interspeech 2022","raw_type":"proceedings-article"}],"best_oa_location":null,"sustainable_development_goals":[{"display_name":"Quality Education","score":0.5799999833106995,"id":"https://metadata.un.org/sdg/4"}],"awards":[],"funders":[],"has_content":{"pdf":false,"grobid_xml":false},"content_urls":null,"referenced_works_count":0,"referenced_works":[],"related_works":["https://openalex.org/W1549056443","https://openalex.org/W1493022169","https://openalex.org/W3107474891","https://openalex.org/W2363669182","https://openalex.org/W2525150146","https://openalex.org/W1491159402","https://openalex.org/W2893763841","https://openalex.org/W2312116756","https://openalex.org/W2778699561","https://openalex.org/W4252026381"],"abstract_inverted_index":{"In":[0,78],"spite":[1],"of":[2,6,74],"the":[3,55,64,72,115],"impressive":[4],"success":[5],"convolutional":[7],"neural":[8],"networks":[9],"(CNNs)":[10],"in":[11,54,143],"speaker":[12,46,103,134],"recognition,":[13],"our":[14,144],"understanding":[15],"to":[16,34,62,70,98,131],"CNNs'":[17],"internal":[18],"functions":[19],"is":[20,26,44],"still":[21],"limited.":[22],"A":[23],"major":[24],"obstacle":[25],"that":[27,45,114],"some":[28],"popular":[29,88],"visualization":[30,65,76,89],"tools":[31],"are":[32,141],"difficult":[33],"apply,":[35],"for":[36,102],"example":[37],"those":[38],"producing":[39],"saliency":[40],"maps.":[41],"The":[42,136],"reason":[43],"information":[47],"does":[48],"not":[49],"show":[50,113],"clear":[51],"spatial":[52],"patterns":[53],"temporal-frequency":[56],"space,":[57],"which":[58],"makes":[59],"it":[60],"hard":[61,69],"interpret":[63],"results,":[66],"and":[67,96,122,139],"hence":[68],"confirm":[71],"reliability":[73,101],"a":[75,109,128],"tool.":[77],"this":[79],"paper,":[80],"we":[81],"conduct":[82],"an":[83],"extensive":[84],"analysis":[85],"on":[86,92,108],"three":[87],"methods":[90],"based":[91],"CAM:":[93],"Grad-CAM,":[94],"Score-CAM":[95],"Layer-CAM,":[97],"investigate":[99],"their":[100],"recognition":[104],"tasks.":[105],"Experiments":[106],"conducted":[107],"state-of-the-art":[110],"ResNet34SE":[111],"model":[112],"Layer-CAM":[116],"algorithm":[117],"can":[118,124],"produce":[119],"reliable":[120],"visualization,":[121],"thus":[123],"be":[125],"used":[126],"as":[127],"promising":[129],"tool":[130],"explain":[132],"CNN-based":[133],"models.":[135],"source":[137],"code":[138],"examples":[140],"available":[142],"project":[145],"page:":[146],"http://project.cslt.org/.":[147]},"counts_by_year":[{"year":2025,"cited_by_count":2},{"year":2024,"cited_by_count":3},{"year":2023,"cited_by_count":2}],"updated_date":"2025-11-06T03:46:38.306776","created_date":"2025-10-10T00:00:00"}
