{"id":"https://openalex.org/W4402980141","doi":"https://doi.org/10.1109/icme57554.2024.10687583","title":"Skipformer: A Skip-and-Recover Strategy for Efficient Speech Recognition","display_name":"Skipformer: A Skip-and-Recover Strategy for Efficient Speech Recognition","publication_year":2024,"publication_date":"2024-07-15","ids":{"openalex":"https://openalex.org/W4402980141","doi":"https://doi.org/10.1109/icme57554.2024.10687583"},"language":"en","primary_location":{"id":"doi:10.1109/icme57554.2024.10687583","is_oa":false,"landing_page_url":"http://dx.doi.org/10.1109/icme57554.2024.10687583","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"2024 IEEE International Conference on Multimedia and Expo (ICME)","raw_type":"proceedings-article"},"type":"article","indexed_in":["crossref"],"open_access":{"is_oa":false,"oa_status":"closed","oa_url":null,"any_repository_has_fulltext":false},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5013263371","display_name":"Wenjing Zhu","orcid":null},"institutions":[{"id":"https://openalex.org/I862669128","display_name":"Xiaomi (China)","ror":"https://ror.org/029f7bn57","country_code":"CN","type":"company","lineage":["https://openalex.org/I862669128"]}],"countries":["CN"],"is_corresponding":true,"raw_author_name":"Wenjing Zhu","raw_affiliation_strings":["Du Xiaoman AI Lab,Beijing,China"],"affiliations":[{"raw_affiliation_string":"Du Xiaoman AI Lab,Beijing,China","institution_ids":["https://openalex.org/I862669128"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5102880200","display_name":"Sining Sun","orcid":"https://orcid.org/0000-0002-2642-5096"},"institutions":[{"id":"https://openalex.org/I862669128","display_name":"Xiaomi (China)","ror":"https://ror.org/029f7bn57","country_code":"CN","type":"company","lineage":["https://openalex.org/I862669128"]}],"countries":["CN"],"is_corresponding":false,"raw_author_name":"Sining Sun","raw_affiliation_strings":["Du Xiaoman AI Lab,Beijing,China"],"affiliations":[{"raw_affiliation_string":"Du Xiaoman AI Lab,Beijing,China","institution_ids":["https://openalex.org/I862669128"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5042775692","display_name":"Changhao Shan","orcid":null},"institutions":[{"id":"https://openalex.org/I862669128","display_name":"Xiaomi (China)","ror":"https://ror.org/029f7bn57","country_code":"CN","type":"company","lineage":["https://openalex.org/I862669128"]}],"countries":["CN"],"is_corresponding":false,"raw_author_name":"Changhao Shan","raw_affiliation_strings":["Du Xiaoman AI Lab,Beijing,China"],"affiliations":[{"raw_affiliation_string":"Du Xiaoman AI Lab,Beijing,China","institution_ids":["https://openalex.org/I862669128"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5101880046","display_name":"Peng Fan","orcid":"https://orcid.org/0000-0002-0801-1893"},"institutions":[{"id":"https://openalex.org/I862669128","display_name":"Xiaomi (China)","ror":"https://ror.org/029f7bn57","country_code":"CN","type":"company","lineage":["https://openalex.org/I862669128"]}],"countries":["CN"],"is_corresponding":false,"raw_author_name":"Peng Fan","raw_affiliation_strings":["Du Xiaoman AI Lab,Beijing,China"],"affiliations":[{"raw_affiliation_string":"Du Xiaoman AI Lab,Beijing,China","institution_ids":["https://openalex.org/I862669128"]}]},{"author_position":"last","author":{"id":"https://openalex.org/A5072334342","display_name":"Qing Yang","orcid":"https://orcid.org/0000-0002-0833-8204"},"institutions":[{"id":"https://openalex.org/I862669128","display_name":"Xiaomi (China)","ror":"https://ror.org/029f7bn57","country_code":"CN","type":"company","lineage":["https://openalex.org/I862669128"]}],"countries":["CN"],"is_corresponding":false,"raw_author_name":"Qing Yang","raw_affiliation_strings":["Du Xiaoman AI Lab,Beijing,China"],"affiliations":[{"raw_affiliation_string":"Du Xiaoman AI Lab,Beijing,China","institution_ids":["https://openalex.org/I862669128"]}]}],"institutions":[],"countries_distinct_count":1,"institutions_distinct_count":5,"corresponding_author_ids":["https://openalex.org/A5013263371"],"corresponding_institution_ids":["https://openalex.org/I862669128"],"apc_list":null,"apc_paid":null,"fwci":0.0,"has_fulltext":false,"cited_by_count":0,"citation_normalized_percentile":{"value":0.18379912,"is_in_top_1_percent":false,"is_in_top_10_percent":false},"cited_by_percentile_year":null,"biblio":{"volume":null,"issue":null,"first_page":"1","last_page":"6"},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T10860","display_name":"Speech and Audio Processing","score":0.9508000016212463,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},"topics":[{"id":"https://openalex.org/T10860","display_name":"Speech and Audio Processing","score":0.9508000016212463,"subfield":{"id":"https://openalex.org/subfields/1711","display_name":"Signal Processing"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T10201","display_name":"Speech Recognition and Synthesis","score":0.9483000040054321,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/speech-recognition","display_name":"Speech recognition","score":0.7716072797775269},{"id":"https://openalex.org/keywords/computer-science","display_name":"Computer science","score":0.7189956903457642},{"id":"https://openalex.org/keywords/artificial-intelligence","display_name":"Artificial intelligence","score":0.3282877802848816}],"concepts":[{"id":"https://openalex.org/C28490314","wikidata":"https://www.wikidata.org/wiki/Q189436","display_name":"Speech recognition","level":1,"score":0.7716072797775269},{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.7189956903457642},{"id":"https://openalex.org/C154945302","wikidata":"https://www.wikidata.org/wiki/Q11660","display_name":"Artificial intelligence","level":1,"score":0.3282877802848816}],"mesh":[],"locations_count":1,"locations":[{"id":"doi:10.1109/icme57554.2024.10687583","is_oa":false,"landing_page_url":"http://dx.doi.org/10.1109/icme57554.2024.10687583","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"2024 IEEE International Conference on Multimedia and Expo (ICME)","raw_type":"proceedings-article"}],"best_oa_location":null,"sustainable_development_goals":[],"awards":[],"funders":[],"has_content":{"pdf":false,"grobid_xml":false},"content_urls":null,"referenced_works_count":0,"referenced_works":[],"related_works":["https://openalex.org/W4391375266","https://openalex.org/W2748952813","https://openalex.org/W2390279801","https://openalex.org/W2358668433","https://openalex.org/W4396701345","https://openalex.org/W2376932109","https://openalex.org/W2001405890","https://openalex.org/W4396696052","https://openalex.org/W2382290278","https://openalex.org/W4395014643"],"abstract_inverted_index":{"VConformer-based":[0],"attention":[1,46],"models":[2],"have":[3],"become":[4],"the":[5,23,34,105,115,131],"de":[6],"facto":[7],"backbone":[8],"model":[9,113,132],"for":[10,28],"Automatic":[11],"Speech":[12],"Recognition":[13],"tasks.":[14],"A":[15],"blank":[16],"symbol":[17],"is":[18,148],"usually":[19],"introduced":[20],"to":[21,59,75],"align":[22],"input":[24,36,62,116],"and":[25,41,65,83,93,124,138],"output":[26,72,95],"sequences":[27],"CTC":[29,71],"or":[30],"RNN-T":[31],"models.":[32,145],"Unfortunately,":[33],"long":[35],"length":[37,63,118],"overloads":[38],"computational":[39],"budget":[40],"memory":[42],"consumption":[43],"quadratically":[44],"by":[45,100,119],"mechanism.":[47],"In":[48],"this":[49],"work,":[50],"we":[51],"propose":[52],"a":[53],"\"Skip-and-Recover\"":[54],"Conformer":[55],"architecture,":[56],"named":[57],"Skipformer,":[58],"squeeze":[60],"sequence":[61,117],"dynamically":[64],"inhomogeneously.":[66],"Skipformer":[67],"uses":[68],"an":[69],"intermediate":[70],"as":[73,104],"criteria":[74],"split":[76],"frames":[77],"into":[78,89],"three":[79],"groups:":[80],"crucial,":[81],"skipping":[82,98],"ignoring.":[84],"The":[85],"crucial":[86],"group":[87,99],"feeds":[88],"next":[90],"conformer":[91],"blocks":[92],"its":[94],"joint":[96],"with":[97],"original":[101],"temporal":[102],"order":[103],"final":[106],"encoder":[107],"output.":[108],"Experiments":[109],"show":[110],"that":[111],"our":[112],"reduces":[114],"31":[120],"times":[121,126],"on":[122,127],"Aishell-1":[123],"22":[125],"Librispeech":[128],"corpus.":[129],"Meanwhile,":[130],"can":[133],"achieve":[134],"better":[135],"recognition":[136],"accuracy":[137],"faster":[139],"inference":[140],"speed":[141],"than":[142],"recent":[143],"baseline":[144],"Our":[146],"code":[147],"available":[149],"at":[150],"https://github.com/Duxiaoman-DI/public-achievements-on-Speech/tree/skipformer.":[151]},"counts_by_year":[],"updated_date":"2025-12-27T23:08:20.325037","created_date":"2025-10-10T00:00:00"}
