{"id":"https://openalex.org/W4415003635","doi":"https://doi.org/10.1109/acit65614.2025.11185889","title":"Reinforcement Learning Control of Markov Chains Based on Stochastic Gradient Descent","display_name":"Reinforcement Learning Control of Markov Chains Based on Stochastic Gradient Descent","publication_year":2025,"publication_date":"2025-09-17","ids":{"openalex":"https://openalex.org/W4415003635","doi":"https://doi.org/10.1109/acit65614.2025.11185889"},"language":"en","primary_location":{"id":"doi:10.1109/acit65614.2025.11185889","is_oa":false,"landing_page_url":"https://doi.org/10.1109/acit65614.2025.11185889","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"2025 15th International Conference on Advanced Computer Information Technologies (ACIT)","raw_type":"proceedings-article"},"type":"article","indexed_in":["crossref"],"open_access":{"is_oa":false,"oa_status":"closed","oa_url":null,"any_repository_has_fulltext":false},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5071298796","display_name":"Leonid Lyubchyk","orcid":"https://orcid.org/0000-0003-0237-8915"},"institutions":[{"id":"https://openalex.org/I67256668","display_name":"National Technical University \"Kharkiv Polytechnic Institute\"","ror":"https://ror.org/00yp5c433","country_code":"UA","type":"education","lineage":["https://openalex.org/I67256668"]}],"countries":["UA"],"is_corresponding":true,"raw_author_name":"Leonid Lyubchyk","raw_affiliation_strings":["National Technical University \"Kharkiv Polytechnic Institute\",Department of Computer Mathematics and Data Analysis,Kharkiv,Ukraine"],"affiliations":[{"raw_affiliation_string":"National Technical University \"Kharkiv Polytechnic Institute\",Department of Computer Mathematics and Data Analysis,Kharkiv,Ukraine","institution_ids":["https://openalex.org/I67256668"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5085249197","display_name":"\u041elena Akhiiezer","orcid":"https://orcid.org/0000-0002-7087-9749"},"institutions":[{"id":"https://openalex.org/I67256668","display_name":"National Technical University \"Kharkiv Polytechnic Institute\"","ror":"https://ror.org/00yp5c433","country_code":"UA","type":"education","lineage":["https://openalex.org/I67256668"]}],"countries":["UA"],"is_corresponding":false,"raw_author_name":"Olena Akhiiezer","raw_affiliation_strings":["National Technical University \"Kharkiv Polytechnic Institute\",Department of Computer Mathematics and Data Analysis,Kharkiv,Ukraine"],"affiliations":[{"raw_affiliation_string":"National Technical University \"Kharkiv Polytechnic Institute\",Department of Computer Mathematics and Data Analysis,Kharkiv,Ukraine","institution_ids":["https://openalex.org/I67256668"]}]},{"author_position":"last","author":{"id":"https://openalex.org/A5091188897","display_name":"Nataliia Protsai","orcid":"https://orcid.org/0000-0002-2057-3231"},"institutions":[{"id":"https://openalex.org/I67256668","display_name":"National Technical University \"Kharkiv Polytechnic Institute\"","ror":"https://ror.org/00yp5c433","country_code":"UA","type":"education","lineage":["https://openalex.org/I67256668"]}],"countries":["UA"],"is_corresponding":false,"raw_author_name":"Nataliia Protsai","raw_affiliation_strings":["National Technical University \"Kharkiv Polytechnic Institute\",Department of Computer Mathematics and Data Analysis,Kharkiv,Ukraine"],"affiliations":[{"raw_affiliation_string":"National Technical University \"Kharkiv Polytechnic Institute\",Department of Computer Mathematics and Data Analysis,Kharkiv,Ukraine","institution_ids":["https://openalex.org/I67256668"]}]}],"institutions":[],"countries_distinct_count":1,"institutions_distinct_count":3,"corresponding_author_ids":["https://openalex.org/A5071298796"],"corresponding_institution_ids":["https://openalex.org/I67256668"],"apc_list":null,"apc_paid":null,"fwci":0.0,"has_fulltext":false,"cited_by_count":0,"citation_normalized_percentile":{"value":0.15464145,"is_in_top_1_percent":false,"is_in_top_10_percent":false},"cited_by_percentile_year":null,"biblio":{"volume":null,"issue":null,"first_page":"66","last_page":"69"},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T12676","display_name":"Machine Learning and ELM","score":0.8355000019073486,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},"topics":[{"id":"https://openalex.org/T12676","display_name":"Machine Learning and ELM","score":0.8355000019073486,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/markov-chain","display_name":"Markov chain","score":0.7210999727249146},{"id":"https://openalex.org/keywords/reinforcement-learning","display_name":"Reinforcement learning","score":0.6643999814987183},{"id":"https://openalex.org/keywords/gradient-descent","display_name":"Gradient descent","score":0.6291999816894531},{"id":"https://openalex.org/keywords/markov-decision-process","display_name":"Markov decision process","score":0.4984000027179718},{"id":"https://openalex.org/keywords/markov-process","display_name":"Markov process","score":0.47870001196861267},{"id":"https://openalex.org/keywords/set","display_name":"Set (abstract data type)","score":0.4779999852180481},{"id":"https://openalex.org/keywords/stochastic-control","display_name":"Stochastic control","score":0.39259999990463257},{"id":"https://openalex.org/keywords/sensitivity","display_name":"Sensitivity (control systems)","score":0.3921999931335449},{"id":"https://openalex.org/keywords/stochastic-gradient-descent","display_name":"Stochastic gradient descent","score":0.3837999999523163}],"concepts":[{"id":"https://openalex.org/C98763669","wikidata":"https://www.wikidata.org/wiki/Q176645","display_name":"Markov chain","level":2,"score":0.7210999727249146},{"id":"https://openalex.org/C97541855","wikidata":"https://www.wikidata.org/wiki/Q830687","display_name":"Reinforcement learning","level":2,"score":0.6643999814987183},{"id":"https://openalex.org/C153258448","wikidata":"https://www.wikidata.org/wiki/Q1199743","display_name":"Gradient descent","level":3,"score":0.6291999816894531},{"id":"https://openalex.org/C33923547","wikidata":"https://www.wikidata.org/wiki/Q395","display_name":"Mathematics","level":0,"score":0.5238000154495239},{"id":"https://openalex.org/C106189395","wikidata":"https://www.wikidata.org/wiki/Q176789","display_name":"Markov decision process","level":3,"score":0.4984000027179718},{"id":"https://openalex.org/C159886148","wikidata":"https://www.wikidata.org/wiki/Q176645","display_name":"Markov process","level":2,"score":0.47870001196861267},{"id":"https://openalex.org/C177264268","wikidata":"https://www.wikidata.org/wiki/Q1514741","display_name":"Set (abstract data type)","level":2,"score":0.4779999852180481},{"id":"https://openalex.org/C126255220","wikidata":"https://www.wikidata.org/wiki/Q141495","display_name":"Mathematical optimization","level":1,"score":0.4603999853134155},{"id":"https://openalex.org/C170131372","wikidata":"https://www.wikidata.org/wiki/Q7617811","display_name":"Stochastic control","level":3,"score":0.39259999990463257},{"id":"https://openalex.org/C21200559","wikidata":"https://www.wikidata.org/wiki/Q7451068","display_name":"Sensitivity (control systems)","level":2,"score":0.3921999931335449},{"id":"https://openalex.org/C206688291","wikidata":"https://www.wikidata.org/wiki/Q7617819","display_name":"Stochastic gradient descent","level":3,"score":0.3837999999523163},{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.37869998812675476},{"id":"https://openalex.org/C2775924081","wikidata":"https://www.wikidata.org/wiki/Q55608371","display_name":"Control (management)","level":2,"score":0.36899998784065247},{"id":"https://openalex.org/C54907487","wikidata":"https://www.wikidata.org/wiki/Q7915688","display_name":"Variable-order Markov model","level":4,"score":0.36010000109672546},{"id":"https://openalex.org/C48103436","wikidata":"https://www.wikidata.org/wiki/Q599031","display_name":"State (computer science)","level":2,"score":0.34299999475479126},{"id":"https://openalex.org/C189973286","wikidata":"https://www.wikidata.org/wiki/Q176695","display_name":"Markov property","level":4,"score":0.3427000045776367},{"id":"https://openalex.org/C91575142","wikidata":"https://www.wikidata.org/wiki/Q1971426","display_name":"Optimal control","level":2,"score":0.3400999903678894},{"id":"https://openalex.org/C115680565","wikidata":"https://www.wikidata.org/wiki/Q5977448","display_name":"Gradient method","level":2,"score":0.3206000030040741},{"id":"https://openalex.org/C49555168","wikidata":"https://www.wikidata.org/wiki/Q176583","display_name":"Stochastic matrix","level":3,"score":0.3073999881744385},{"id":"https://openalex.org/C163836022","wikidata":"https://www.wikidata.org/wiki/Q6771326","display_name":"Markov model","level":3,"score":0.3052000105381012},{"id":"https://openalex.org/C188116033","wikidata":"https://www.wikidata.org/wiki/Q2664563","display_name":"Q-learning","level":3,"score":0.29760000109672546},{"id":"https://openalex.org/C8272713","wikidata":"https://www.wikidata.org/wiki/Q176737","display_name":"Stochastic process","level":2,"score":0.2921999990940094},{"id":"https://openalex.org/C55479107","wikidata":"https://www.wikidata.org/wiki/Q97663916","display_name":"Stochastic approximation","level":3,"score":0.288100004196167},{"id":"https://openalex.org/C17098449","wikidata":"https://www.wikidata.org/wiki/Q176814","display_name":"Partially observable Markov decision process","level":4,"score":0.26840001344680786},{"id":"https://openalex.org/C47446073","wikidata":"https://www.wikidata.org/wiki/Q5165890","display_name":"Control theory (sociology)","level":3,"score":0.265500009059906}],"mesh":[],"locations_count":1,"locations":[{"id":"doi:10.1109/acit65614.2025.11185889","is_oa":false,"landing_page_url":"https://doi.org/10.1109/acit65614.2025.11185889","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"2025 15th International Conference on Advanced Computer Information Technologies (ACIT)","raw_type":"proceedings-article"}],"best_oa_location":null,"sustainable_development_goals":[],"awards":[],"funders":[],"has_content":{"grobid_xml":false,"pdf":false},"content_urls":null,"referenced_works_count":0,"referenced_works":[],"related_works":[],"abstract_inverted_index":{"The":[0],"control":[1,48,79,110,119],"problem":[2],"for":[3,25,98,118],"a":[4,29,60,77,85],"Markov":[5,142],"chain":[6],"with":[7],"random":[8,44],"rewards":[9,45,138],"under":[10],"uncertainty":[11],"seeks":[12],"to":[13],"maximize":[14],"the":[15,22,39,99,102,106,109,134,140],"average":[16,66],"total":[17],"reward.":[18],"It":[19],"assumes":[20],"that":[21,37,55,74],"transition":[23,63],"matrices":[24,64],"state":[26],"probabilities,":[27],"given":[28],"fixed":[30],"set":[31,61],"of":[32,41,59,62,101,108],"controls,":[33],"are":[34,126],"unknown,":[35],"and":[36,43,65,137],"only":[38],"sequences":[40],"states":[42,136],"resulting":[46],"from":[47],"decisions":[49],"can":[50],"be":[51],"observed.":[52],"Unlike":[53],"methods":[54],"require":[56],"prior":[57],"estimation":[58],"expected":[67],"rewards,":[68],"this":[69],"work":[70],"takes":[71],"an":[72,96],"approach":[73],"directly":[75],"constructs":[76],"randomized":[78],"policy":[80],"through":[81],"reinforcement":[82],"learning":[83,116],"using":[84],"stochastic":[86,123],"gradient":[87,100,124],"descent":[88],"procedure.":[89],"By":[90],"utilizing":[91],"sensitivity":[92],"functions,":[93],"it":[94],"derives":[95],"expression":[97],"optimality":[103],"criterion":[104],"concerning":[105],"elements":[107],"choice":[111],"probability":[112],"matrix.":[113],"Recurrent":[114],"projected":[115],"algorithms":[117],"policy,":[120],"based":[121],"on":[122,130],"descent,":[125],"proposed,":[127],"relying":[128],"solely":[129],"current":[131],"information":[132],"about":[133],"observable":[135],"within":[139],"controlled":[141],"chain.":[143]},"counts_by_year":[],"updated_date":"2026-03-07T16:01:11.037858","created_date":"2025-10-10T00:00:00"}
