{"id":"https://openalex.org/W7127496937","doi":"https://doi.org/10.48550/arxiv.2602.02260","title":"Learning Markov Decision Processes under Fully Bandit Feedback","display_name":"Learning Markov Decision Processes under Fully Bandit Feedback","publication_year":2026,"publication_date":"2026-02-02","ids":{"openalex":"https://openalex.org/W7127496937","doi":"https://doi.org/10.48550/arxiv.2602.02260"},"language":null,"primary_location":{"id":"pmh:doi:10.48550/arxiv.2602.02260","is_oa":true,"landing_page_url":null,"pdf_url":null,"source":{"id":"https://openalex.org/S4406922384","display_name":"Open MIND","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":null,"host_organization_name":null,"host_organization_lineage":[],"host_organization_lineage_names":[],"type":"repository"},"license":"publisher-specific-oa","license_id":"https://openalex.org/licenses/publisher-specific-oa","version":"submittedVersion","is_accepted":false,"is_published":false,"raw_source_name":null,"raw_type":"Article"},"type":"preprint","indexed_in":["datacite"],"open_access":{"is_oa":true,"oa_status":"green","oa_url":null,"any_repository_has_fulltext":true},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5110984750","display_name":"Zhengjia Zhuo","orcid":null},"institutions":[],"countries":[],"is_corresponding":true,"raw_author_name":"Zhuo, Zhengjia","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"middle","author":{"id":"https://openalex.org/A5004167360","display_name":"Anupam Gupta","orcid":null},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"Gupta, Anupam","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"last","author":{"id":null,"display_name":"Nagarajan, Viswanath","orcid":null},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"Nagarajan, Viswanath","raw_affiliation_strings":[],"affiliations":[]}],"institutions":[],"countries_distinct_count":0,"institutions_distinct_count":3,"corresponding_author_ids":["https://openalex.org/A5110984750"],"corresponding_institution_ids":[],"apc_list":null,"apc_paid":null,"fwci":null,"has_fulltext":false,"cited_by_count":0,"citation_normalized_percentile":null,"cited_by_percentile_year":null,"biblio":{"volume":null,"issue":null,"first_page":null,"last_page":null},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T12101","display_name":"Advanced Bandit Algorithms Research","score":0.878000020980835,"subfield":{"id":"https://openalex.org/subfields/1803","display_name":"Management Science and Operations Research"},"field":{"id":"https://openalex.org/fields/18","display_name":"Decision Sciences"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}},"topics":[{"id":"https://openalex.org/T12101","display_name":"Advanced Bandit Algorithms Research","score":0.878000020980835,"subfield":{"id":"https://openalex.org/subfields/1803","display_name":"Management Science and Operations Research"},"field":{"id":"https://openalex.org/fields/18","display_name":"Decision Sciences"},"domain":{"id":"https://openalex.org/domains/2","display_name":"Social Sciences"}},{"id":"https://openalex.org/T10462","display_name":"Reinforcement Learning in Robotics","score":0.0778999999165535,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T13553","display_name":"Age of Information Optimization","score":0.018200000748038292,"subfield":{"id":"https://openalex.org/subfields/1705","display_name":"Computer Networks and Communications"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/regret","display_name":"Regret","score":0.8460000157356262},{"id":"https://openalex.org/keywords/markov-decision-process","display_name":"Markov decision process","score":0.7985000014305115},{"id":"https://openalex.org/keywords/reinforcement-learning","display_name":"Reinforcement learning","score":0.6406999826431274},{"id":"https://openalex.org/keywords/markov-process","display_name":"Markov process","score":0.5110999941825867},{"id":"https://openalex.org/keywords/markov-chain","display_name":"Markov chain","score":0.4860999882221222},{"id":"https://openalex.org/keywords/trajectory","display_name":"Trajectory","score":0.4578999876976013},{"id":"https://openalex.org/keywords/q-learning","display_name":"Q-learning","score":0.44279998540878296},{"id":"https://openalex.org/keywords/process","display_name":"Process (computing)","score":0.3937000036239624}],"concepts":[{"id":"https://openalex.org/C50817715","wikidata":"https://www.wikidata.org/wiki/Q79895177","display_name":"Regret","level":2,"score":0.8460000157356262},{"id":"https://openalex.org/C106189395","wikidata":"https://www.wikidata.org/wiki/Q176789","display_name":"Markov decision process","level":3,"score":0.7985000014305115},{"id":"https://openalex.org/C97541855","wikidata":"https://www.wikidata.org/wiki/Q830687","display_name":"Reinforcement learning","level":2,"score":0.6406999826431274},{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.6085000038146973},{"id":"https://openalex.org/C159886148","wikidata":"https://www.wikidata.org/wiki/Q176645","display_name":"Markov process","level":2,"score":0.5110999941825867},{"id":"https://openalex.org/C98763669","wikidata":"https://www.wikidata.org/wiki/Q176645","display_name":"Markov chain","level":2,"score":0.4860999882221222},{"id":"https://openalex.org/C13662910","wikidata":"https://www.wikidata.org/wiki/Q193139","display_name":"Trajectory","level":2,"score":0.4578999876976013},{"id":"https://openalex.org/C154945302","wikidata":"https://www.wikidata.org/wiki/Q11660","display_name":"Artificial intelligence","level":1,"score":0.44690001010894775},{"id":"https://openalex.org/C188116033","wikidata":"https://www.wikidata.org/wiki/Q2664563","display_name":"Q-learning","level":3,"score":0.44279998540878296},{"id":"https://openalex.org/C126255220","wikidata":"https://www.wikidata.org/wiki/Q141495","display_name":"Mathematical optimization","level":1,"score":0.4320000112056732},{"id":"https://openalex.org/C98045186","wikidata":"https://www.wikidata.org/wiki/Q205663","display_name":"Process (computing)","level":2,"score":0.3937000036239624},{"id":"https://openalex.org/C123197309","wikidata":"https://www.wikidata.org/wiki/Q2882343","display_name":"Multi-armed bandit","level":3,"score":0.3873000144958496},{"id":"https://openalex.org/C4679612","wikidata":"https://www.wikidata.org/wiki/Q866298","display_name":"Aggregate (composite)","level":2,"score":0.36070001125335693},{"id":"https://openalex.org/C151376022","wikidata":"https://www.wikidata.org/wiki/Q168698","display_name":"Exponential function","level":2,"score":0.34119999408721924},{"id":"https://openalex.org/C28761237","wikidata":"https://www.wikidata.org/wiki/Q7805321","display_name":"Time horizon","level":2,"score":0.3400000035762787},{"id":"https://openalex.org/C119857082","wikidata":"https://www.wikidata.org/wiki/Q2539","display_name":"Machine learning","level":1,"score":0.30309998989105225},{"id":"https://openalex.org/C112972136","wikidata":"https://www.wikidata.org/wiki/Q7595718","display_name":"Stability (learning theory)","level":2,"score":0.2992999851703644},{"id":"https://openalex.org/C163836022","wikidata":"https://www.wikidata.org/wiki/Q6771326","display_name":"Markov model","level":3,"score":0.2935999929904938},{"id":"https://openalex.org/C8272713","wikidata":"https://www.wikidata.org/wiki/Q176737","display_name":"Stochastic process","level":2,"score":0.2922999858856201},{"id":"https://openalex.org/C33923547","wikidata":"https://www.wikidata.org/wiki/Q395","display_name":"Mathematics","level":0,"score":0.29100000858306885},{"id":"https://openalex.org/C159176650","wikidata":"https://www.wikidata.org/wiki/Q43261","display_name":"Horizon","level":2,"score":0.2849999964237213},{"id":"https://openalex.org/C173801870","wikidata":"https://www.wikidata.org/wiki/Q201413","display_name":"Heuristic","level":2,"score":0.27379998564720154},{"id":"https://openalex.org/C77618280","wikidata":"https://www.wikidata.org/wiki/Q1155772","display_name":"Scheme (mathematics)","level":2,"score":0.2718999981880188},{"id":"https://openalex.org/C196340769","wikidata":"https://www.wikidata.org/wiki/Q7698910","display_name":"Temporal difference learning","level":3,"score":0.26510000228881836},{"id":"https://openalex.org/C11413529","wikidata":"https://www.wikidata.org/wiki/Q8366","display_name":"Algorithm","level":1,"score":0.26010000705718994}],"mesh":[],"locations_count":2,"locations":[{"id":"pmh:doi:10.48550/arxiv.2602.02260","is_oa":true,"landing_page_url":null,"pdf_url":null,"source":{"id":"https://openalex.org/S4406922384","display_name":"Open MIND","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":null,"host_organization_name":null,"host_organization_lineage":[],"host_organization_lineage_names":[],"type":"repository"},"license":"publisher-specific-oa","license_id":"https://openalex.org/licenses/publisher-specific-oa","version":"submittedVersion","is_accepted":false,"is_published":false,"raw_source_name":null,"raw_type":"Article"},{"id":"doi:10.48550/arxiv.2602.02260","is_oa":true,"landing_page_url":"https://doi.org/10.48550/arxiv.2602.02260","pdf_url":null,"source":{"id":"https://openalex.org/S4306400194","display_name":"arXiv (Cornell University)","issn_l":null,"issn":null,"is_oa":true,"is_in_doaj":false,"is_core":false,"host_organization":"https://openalex.org/I205783295","host_organization_name":"Cornell University","host_organization_lineage":["https://openalex.org/I205783295"],"host_organization_lineage_names":[],"type":"repository"},"license":null,"license_id":null,"version":null,"is_accepted":false,"is_published":null,"raw_source_name":null,"raw_type":"article"}],"best_oa_location":{"id":"pmh:doi:10.48550/arxiv.2602.02260","is_oa":true,"landing_page_url":null,"pdf_url":null,"source":{"id":"https://openalex.org/S4406922384","display_name":"Open MIND","issn_l":null,"issn":null,"is_oa":false,"is_in_doaj":false,"is_core":false,"host_organization":null,"host_organization_name":null,"host_organization_lineage":[],"host_organization_lineage_names":[],"type":"repository"},"license":"publisher-specific-oa","license_id":"https://openalex.org/licenses/publisher-specific-oa","version":"submittedVersion","is_accepted":false,"is_published":false,"raw_source_name":null,"raw_type":"Article"},"sustainable_development_goals":[{"display_name":"Peace, Justice and strong institutions","score":0.6549501419067383,"id":"https://metadata.un.org/sdg/16"}],"awards":[],"funders":[],"has_content":{"grobid_xml":false,"pdf":false},"content_urls":null,"referenced_works_count":0,"referenced_works":[],"related_works":[],"abstract_inverted_index":{"A":[0],"standard":[1],"assumption":[2],"in":[3,15,32],"Reinforcement":[4],"Learning":[5],"is":[6,135,190],"that":[7,193],"the":[8,16,24,59,63,90,96,104,109,128,169,176,183],"agent":[9,60,91],"observes":[10,61],"every":[11],"visited":[12,64,97],"state-action":[13,65,98,202],"pair":[14],"associated":[17],"Markov":[18],"Decision":[19],"Process":[20],"(MDP),":[21],"along":[22],"with":[23,118,200],"per-step":[25],"rewards.":[26],"Strong":[27],"theoretical":[28],"results":[29],"are":[30],"known":[31],"this":[33,74],"setting,":[34],"achieving":[35],"nearly-tight":[36,141],"$\u0398(\\sqrt{T})$-regret":[37],"bounds.":[38],"However,":[39],"such":[40,54,157],"detailed":[41,201],"feedback":[42,84],"can":[43,148],"be":[44,149],"unrealistic,":[45],"and":[46,162],"recent":[47],"research":[48],"has":[49,123],"investigated":[50],"more":[51,80],"restricted":[52,185],"settings":[53],"as":[55,158],"trajectory":[56],"feedback,":[57,186],"where":[58,89],"all":[62],"pairs,":[66],"but":[67],"only":[68,102],"a":[69,78,195],"single":[70],"\\emph{aggregate}":[71],"reward.":[72,106],"In":[73],"paper,":[75],"we":[76,133,167],"consider":[77],"far":[79],"restrictive":[81],"``fully":[82],"bandit''":[83],"model":[85,152],"for":[86,115,144,175],"episodic":[87,116],"MDPs,":[88],"does":[92],"not":[93],"even":[94],"observe":[95],"pairs":[99],"--":[100],"it":[101],"learns":[103],"aggregate":[105],"We":[107,137],"provide":[108],"first":[110],"efficient":[111],"bandit":[112],"learning":[113,197],"algorithm":[114,174,198],"MDPs":[117],"$\\widetilde{O}(\\sqrt{T})$":[119],"regret.":[120],"Our":[121],"regret":[122,142],"an":[124],"exponential":[125],"dependence":[126],"on":[127],"horizon":[129],"length":[130],"$\\H$,":[131],"which":[132],"show":[134],"necessary.":[136],"also":[138],"obtain":[139],"improved":[140],"bounds":[143],"``ordered''":[145],"MDPs;":[146],"these":[147],"used":[150],"to":[151,192],"classical":[153],"stochastic":[154],"optimization":[155],"problems":[156],"$k$-item":[159,179],"prophet":[160,180],"inequality":[161],"sequential":[163],"posted":[164],"pricing.":[165],"Finally,":[166],"evaluate":[168],"empirical":[170],"performance":[171,189],"of":[172,178,194],"our":[173,187],"setting":[177],"inequalities;":[181],"despite":[182],"highly":[184],"algorithm's":[188],"comparable":[191],"state-of-art":[196],"(UCB-VI)":[199],"feedback.":[203]},"counts_by_year":[],"updated_date":"2026-04-04T16:13:02.066488","created_date":"2026-02-04T00:00:00"}
