{"id":"https://openalex.org/W4385188895","doi":"https://doi.org/10.1145/3583133.3590610","title":"Evolution of an Internal Reward Function for Reinforcement Learning","display_name":"Evolution of an Internal Reward Function for Reinforcement Learning","publication_year":2023,"publication_date":"2023-07-15","ids":{"openalex":"https://openalex.org/W4385188895","doi":"https://doi.org/10.1145/3583133.3590610"},"language":"en","primary_location":{"id":"doi:10.1145/3583133.3590610","is_oa":false,"landing_page_url":"http://dx.doi.org/10.1145/3583133.3590610","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"Proceedings of the Companion Conference on Genetic and Evolutionary Computation","raw_type":"proceedings-article"},"type":"article","indexed_in":["crossref"],"open_access":{"is_oa":false,"oa_status":"closed","oa_url":null,"any_repository_has_fulltext":false},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5054262764","display_name":"Weiyi Zuo","orcid":"https://orcid.org/0009-0002-1421-3667"},"institutions":[{"id":"https://openalex.org/I4210165038","display_name":"University of Chinese Academy of Sciences","ror":"https://ror.org/05qbk4x57","country_code":"CN","type":"education","lineage":["https://openalex.org/I19820366","https://openalex.org/I4210165038"]}],"countries":["CN"],"is_corresponding":true,"raw_author_name":"Weiyi Zuo","raw_affiliation_strings":["University of Chinese Academy of Sciences, Beijing, China"],"affiliations":[{"raw_affiliation_string":"University of Chinese Academy of Sciences, Beijing, China","institution_ids":["https://openalex.org/I4210165038"]}]},{"author_position":"middle","author":{"id":"https://openalex.org/A5090011209","display_name":"Joachim Winther Pedersen","orcid":"https://orcid.org/0000-0001-7884-9432"},"institutions":[{"id":"https://openalex.org/I83467386","display_name":"IT University of Copenhagen","ror":"https://ror.org/02309jg23","country_code":"DK","type":"education","lineage":["https://openalex.org/I83467386"]}],"countries":["DK"],"is_corresponding":false,"raw_author_name":"Joachim Pedersen","raw_affiliation_strings":["IT University of Copenhagen, Copenhagen, Denmark"],"affiliations":[{"raw_affiliation_string":"IT University of Copenhagen, Copenhagen, Denmark","institution_ids":["https://openalex.org/I83467386"]}]},{"author_position":"last","author":{"id":"https://openalex.org/A5020511097","display_name":"Sebastian Risi","orcid":"https://orcid.org/0000-0003-3607-8400"},"institutions":[{"id":"https://openalex.org/I83467386","display_name":"IT University of Copenhagen","ror":"https://ror.org/02309jg23","country_code":"DK","type":"education","lineage":["https://openalex.org/I83467386"]}],"countries":["DK"],"is_corresponding":false,"raw_author_name":"Sebastian Risi","raw_affiliation_strings":["IT University of Copenhagen, Copenhagen, Denmark"],"affiliations":[{"raw_affiliation_string":"IT University of Copenhagen, Copenhagen, Denmark","institution_ids":["https://openalex.org/I83467386"]}]}],"institutions":[],"countries_distinct_count":2,"institutions_distinct_count":3,"corresponding_author_ids":["https://openalex.org/A5054262764"],"corresponding_institution_ids":["https://openalex.org/I4210165038"],"apc_list":null,"apc_paid":null,"fwci":0.0,"has_fulltext":false,"cited_by_count":0,"citation_normalized_percentile":{"value":0.07754116,"is_in_top_1_percent":false,"is_in_top_10_percent":false},"cited_by_percentile_year":null,"biblio":{"volume":null,"issue":null,"first_page":"351","last_page":"354"},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T10462","display_name":"Reinforcement Learning in Robotics","score":0.9869999885559082,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},"topics":[{"id":"https://openalex.org/T10462","display_name":"Reinforcement Learning in Robotics","score":0.9869999885559082,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/reinforcement-learning","display_name":"Reinforcement learning","score":0.9349961280822754},{"id":"https://openalex.org/keywords/computer-science","display_name":"Computer science","score":0.7730668187141418},{"id":"https://openalex.org/keywords/function","display_name":"Function (biology)","score":0.5666719675064087},{"id":"https://openalex.org/keywords/artificial-intelligence","display_name":"Artificial intelligence","score":0.5438163876533508},{"id":"https://openalex.org/keywords/stability","display_name":"Stability (learning theory)","score":0.5389611721038818},{"id":"https://openalex.org/keywords/artificial-neural-network","display_name":"Artificial neural network","score":0.5253673791885376},{"id":"https://openalex.org/keywords/signal","display_name":"SIGNAL (programming language)","score":0.4908183515071869},{"id":"https://openalex.org/keywords/control","display_name":"Control (management)","score":0.4531552791595459},{"id":"https://openalex.org/keywords/robot","display_name":"Robot","score":0.45083391666412354},{"id":"https://openalex.org/keywords/reinforcement","display_name":"Reinforcement","score":0.41847431659698486},{"id":"https://openalex.org/keywords/machine-learning","display_name":"Machine learning","score":0.3059508204460144},{"id":"https://openalex.org/keywords/engineering","display_name":"Engineering","score":0.12277865409851074}],"concepts":[{"id":"https://openalex.org/C97541855","wikidata":"https://www.wikidata.org/wiki/Q830687","display_name":"Reinforcement learning","level":2,"score":0.9349961280822754},{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.7730668187141418},{"id":"https://openalex.org/C14036430","wikidata":"https://www.wikidata.org/wiki/Q3736076","display_name":"Function (biology)","level":2,"score":0.5666719675064087},{"id":"https://openalex.org/C154945302","wikidata":"https://www.wikidata.org/wiki/Q11660","display_name":"Artificial intelligence","level":1,"score":0.5438163876533508},{"id":"https://openalex.org/C112972136","wikidata":"https://www.wikidata.org/wiki/Q7595718","display_name":"Stability (learning theory)","level":2,"score":0.5389611721038818},{"id":"https://openalex.org/C50644808","wikidata":"https://www.wikidata.org/wiki/Q192776","display_name":"Artificial neural network","level":2,"score":0.5253673791885376},{"id":"https://openalex.org/C2779843651","wikidata":"https://www.wikidata.org/wiki/Q7390335","display_name":"SIGNAL (programming language)","level":2,"score":0.4908183515071869},{"id":"https://openalex.org/C2775924081","wikidata":"https://www.wikidata.org/wiki/Q55608371","display_name":"Control (management)","level":2,"score":0.4531552791595459},{"id":"https://openalex.org/C90509273","wikidata":"https://www.wikidata.org/wiki/Q11012","display_name":"Robot","level":2,"score":0.45083391666412354},{"id":"https://openalex.org/C67203356","wikidata":"https://www.wikidata.org/wiki/Q1321905","display_name":"Reinforcement","level":2,"score":0.41847431659698486},{"id":"https://openalex.org/C119857082","wikidata":"https://www.wikidata.org/wiki/Q2539","display_name":"Machine learning","level":1,"score":0.3059508204460144},{"id":"https://openalex.org/C127413603","wikidata":"https://www.wikidata.org/wiki/Q11023","display_name":"Engineering","level":0,"score":0.12277865409851074},{"id":"https://openalex.org/C78458016","wikidata":"https://www.wikidata.org/wiki/Q840400","display_name":"Evolutionary biology","level":1,"score":0.0},{"id":"https://openalex.org/C86803240","wikidata":"https://www.wikidata.org/wiki/Q420","display_name":"Biology","level":0,"score":0.0},{"id":"https://openalex.org/C199360897","wikidata":"https://www.wikidata.org/wiki/Q9143","display_name":"Programming language","level":1,"score":0.0},{"id":"https://openalex.org/C66938386","wikidata":"https://www.wikidata.org/wiki/Q633538","display_name":"Structural engineering","level":1,"score":0.0}],"mesh":[],"locations_count":1,"locations":[{"id":"doi:10.1145/3583133.3590610","is_oa":false,"landing_page_url":"http://dx.doi.org/10.1145/3583133.3590610","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"Proceedings of the Companion Conference on Genetic and Evolutionary Computation","raw_type":"proceedings-article"}],"best_oa_location":null,"sustainable_development_goals":[],"awards":[],"funders":[],"has_content":{"grobid_xml":false,"pdf":false},"content_urls":null,"referenced_works_count":0,"referenced_works":[],"related_works":["https://openalex.org/W3074294383","https://openalex.org/W4206669594","https://openalex.org/W2961085424","https://openalex.org/W2959276766","https://openalex.org/W4295941380","https://openalex.org/W260766989","https://openalex.org/W3139193008","https://openalex.org/W4306674287","https://openalex.org/W2909304650","https://openalex.org/W4319083788"],"abstract_inverted_index":{"Artificial":[0],"neural":[1],"networks":[2],"(ANNs)":[3],"can":[4,35],"be":[5,42,45,92],"trained":[6],"with":[7,72],"reinforcement":[8],"learning":[9],"(RL)":[10],"in":[11,18,22,37,68,83],"simulation":[12],"to":[13,44,47,49,59,70,100,113,172],"control":[14,142],"robots.":[15],"However,":[16,67],"changes":[17],"the":[19,33,51,54,84,120,125,161,170,174,181],"environment":[20],"resulting":[21],"out-of-distribution":[23],"situations":[24],"put":[25],"learned":[26],"policies":[27],"at":[28],"risk":[29],"of":[30,53,124,160],"failure.":[31],"Since":[32],"world":[34],"change":[36],"unpredictable":[38],"ways,":[39],"it":[40,139],"might":[41,88],"desirable":[43],"able":[46],"continue":[48],"update":[50],"parameters":[52],"ANNs":[55],"even":[56],"after":[57,94],"deployment,":[58],"prevent":[60],"failures":[61],"stemming":[62],"from":[63],"a":[64,74,98,106,110],"distributional":[65],"shift.":[66],"order":[69],"optimize":[71],"RL,":[73],"reward":[75,111,147],"signal":[76,112],"is":[77,80],"needed.":[78],"This":[79],"usually":[81],"provided":[82],"simulated":[85],"environment,":[86],"but":[87],"not":[89],"necessarily":[90],"always":[91],"available":[93],"training.":[95],"We":[96,127,185],"propose":[97],"solution":[99],"this":[101,129],"problem":[102],"that":[103,108,144],"involves":[104],"evolving":[105],"function":[107],"provides":[109],"an":[114],"RL":[115,162,175],"algorithm":[116],"based":[117],"only":[118],"on":[119,140,194],"inputs":[121],"and":[122,137,149,158],"outputs":[123],"policy.":[126],"call":[128],"approach":[130],"Evolved":[131],"Internal":[132],"Reward":[133],"Reinforcement":[134],"Learning":[135],"(EIR-RL)":[136],"test":[138],"various":[141],"tasks":[143],"have":[145],"different":[146],"structures":[148],"difficulty":[150],"levels.":[151],"Our":[152],"method":[153],"shows":[154],"improved":[155],"training":[156],"stability":[157],"speed":[159],"agent":[163,176],"under":[164,177],"standard":[165],"circumstances,":[166],"as":[167,169],"well":[168],"ability":[171],"train":[173],"circumstances":[178],"unseen":[179],"during":[180],"initial":[182],"optimization":[183],"phase.":[184],"discuss":[186],"how":[187],"our":[188],"results":[189],"could":[190],"inform":[191],"future":[192],"studies":[193],"autonomous,":[195],"adapting":[196],"agents.":[197]},"counts_by_year":[],"updated_date":"2025-12-24T23:09:58.560324","created_date":"2025-10-10T00:00:00"}
