{"id":"https://openalex.org/W1543104516","doi":"https://doi.org/10.1109/apsitt.2015.7217104","title":"Acquisition of cooperative behaviour among heterogeneous agents using step-up reinforcement learning","display_name":"Acquisition of cooperative behaviour among heterogeneous agents using step-up reinforcement learning","publication_year":2015,"publication_date":"2015-08-01","ids":{"openalex":"https://openalex.org/W1543104516","doi":"https://doi.org/10.1109/apsitt.2015.7217104","mag":"1543104516"},"language":"en","primary_location":{"id":"doi:10.1109/apsitt.2015.7217104","is_oa":false,"landing_page_url":"https://doi.org/10.1109/apsitt.2015.7217104","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"2015 10th Asia-Pacific Symposium on Information and Telecommunication Technologies (APSITT)","raw_type":"proceedings-article"},"type":"article","indexed_in":["crossref"],"open_access":{"is_oa":false,"oa_status":"closed","oa_url":null,"any_repository_has_fulltext":false},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5043769542","display_name":"Wataru Sato","orcid":"https://orcid.org/0000-0002-5335-1272"},"institutions":[{"id":"https://openalex.org/I116465919","display_name":"Kogakuin University","ror":"https://ror.org/01wc2tq75","country_code":"JP","type":"education","lineage":["https://openalex.org/I116465919"]}],"countries":["JP"],"is_corresponding":true,"raw_author_name":"Wataru Sato","raw_affiliation_strings":["Department of Informatics, Kogakuin University, Tokyo, Japan","Department of Informatics Graduate School of Engineering Kogakuin University Shinjuku-ku, Tokyo, Japan"],"affiliations":[{"raw_affiliation_string":"Department of Informatics, Kogakuin University, Tokyo, Japan","institution_ids":["https://openalex.org/I116465919"]},{"raw_affiliation_string":"Department of Informatics Graduate School of Engineering Kogakuin University Shinjuku-ku, Tokyo, Japan","institution_ids":["https://openalex.org/I116465919"]}]},{"author_position":"last","author":{"id":"https://openalex.org/A5112639721","display_name":"Kanta Tachibana","orcid":"https://orcid.org/0000-0002-8675-7842"},"institutions":[{"id":"https://openalex.org/I116465919","display_name":"Kogakuin University","ror":"https://ror.org/01wc2tq75","country_code":"JP","type":"education","lineage":["https://openalex.org/I116465919"]}],"countries":["JP"],"is_corresponding":false,"raw_author_name":"Kanta Tachibana","raw_affiliation_strings":["Faculty of Informatics, Kogakuin University, Tokyo, Japan","Faculty of Informatics Kogakuin University Shinjuku-ku, Tokyo, Japan"],"affiliations":[{"raw_affiliation_string":"Faculty of Informatics, Kogakuin University, Tokyo, Japan","institution_ids":["https://openalex.org/I116465919"]},{"raw_affiliation_string":"Faculty of Informatics Kogakuin University Shinjuku-ku, Tokyo, Japan","institution_ids":["https://openalex.org/I116465919"]}]}],"institutions":[],"countries_distinct_count":1,"institutions_distinct_count":2,"corresponding_author_ids":["https://openalex.org/A5043769542"],"corresponding_institution_ids":["https://openalex.org/I116465919"],"apc_list":null,"apc_paid":null,"fwci":0.0,"has_fulltext":false,"cited_by_count":2,"citation_normalized_percentile":{"value":0.01874703,"is_in_top_1_percent":false,"is_in_top_10_percent":false},"cited_by_percentile_year":{"min":89,"max":94},"biblio":{"volume":"38","issue":null,"first_page":"1","last_page":"3"},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T10462","display_name":"Reinforcement Learning in Robotics","score":0.9975000023841858,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},"topics":[{"id":"https://openalex.org/T10462","display_name":"Reinforcement Learning in Robotics","score":0.9975000023841858,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T10456","display_name":"Multi-Agent Systems and Negotiation","score":0.9962000250816345,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T11975","display_name":"Evolutionary Algorithms and Applications","score":0.9746999740600586,"subfield":{"id":"https://openalex.org/subfields/1702","display_name":"Artificial Intelligence"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/reinforcement-learning","display_name":"Reinforcement learning","score":0.7997304201126099},{"id":"https://openalex.org/keywords/computer-science","display_name":"Computer science","score":0.7771506309509277},{"id":"https://openalex.org/keywords/artificial-intelligence","display_name":"Artificial intelligence","score":0.4622132182121277},{"id":"https://openalex.org/keywords/error-driven-learning","display_name":"Error-driven learning","score":0.4580466151237488},{"id":"https://openalex.org/keywords/multi-agent-system","display_name":"Multi-agent system","score":0.45628273487091064},{"id":"https://openalex.org/keywords/human\u2013computer-interaction","display_name":"Human\u2013computer interaction","score":0.38628089427948},{"id":"https://openalex.org/keywords/machine-learning","display_name":"Machine learning","score":0.3849853277206421}],"concepts":[{"id":"https://openalex.org/C97541855","wikidata":"https://www.wikidata.org/wiki/Q830687","display_name":"Reinforcement learning","level":2,"score":0.7997304201126099},{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.7771506309509277},{"id":"https://openalex.org/C154945302","wikidata":"https://www.wikidata.org/wiki/Q11660","display_name":"Artificial intelligence","level":1,"score":0.4622132182121277},{"id":"https://openalex.org/C47932503","wikidata":"https://www.wikidata.org/wiki/Q5395689","display_name":"Error-driven learning","level":3,"score":0.4580466151237488},{"id":"https://openalex.org/C41550386","wikidata":"https://www.wikidata.org/wiki/Q529909","display_name":"Multi-agent system","level":2,"score":0.45628273487091064},{"id":"https://openalex.org/C107457646","wikidata":"https://www.wikidata.org/wiki/Q207434","display_name":"Human\u2013computer interaction","level":1,"score":0.38628089427948},{"id":"https://openalex.org/C119857082","wikidata":"https://www.wikidata.org/wiki/Q2539","display_name":"Machine learning","level":1,"score":0.3849853277206421}],"mesh":[],"locations_count":1,"locations":[{"id":"doi:10.1109/apsitt.2015.7217104","is_oa":false,"landing_page_url":"https://doi.org/10.1109/apsitt.2015.7217104","pdf_url":null,"source":null,"license":null,"license_id":null,"version":"publishedVersion","is_accepted":true,"is_published":true,"raw_source_name":"2015 10th Asia-Pacific Symposium on Information and Telecommunication Technologies (APSITT)","raw_type":"proceedings-article"}],"best_oa_location":null,"sustainable_development_goals":[],"awards":[],"funders":[],"has_content":{"pdf":false,"grobid_xml":false},"content_urls":null,"referenced_works_count":4,"referenced_works":["https://openalex.org/W2029733418","https://openalex.org/W2327142188","https://openalex.org/W2781750485","https://openalex.org/W6746497802"],"related_works":["https://openalex.org/W2371091044","https://openalex.org/W2171010636","https://openalex.org/W87513465","https://openalex.org/W2391666574","https://openalex.org/W2786230833","https://openalex.org/W3203256658","https://openalex.org/W2352650970","https://openalex.org/W1544514152","https://openalex.org/W1493952344","https://openalex.org/W4312372616"],"abstract_inverted_index":{"This":[0],"paper":[1],"discusses":[2],"acquisition":[3],"of":[4,75,81],"cooperative":[5,16],"behaviour":[6],"among":[7],"heterogeneous":[8,50],"agents":[9,36,82,94,99],"and":[10,20],"proposes":[11],"two":[12,79],"methods":[13],"to":[14,69,84,97],"promote":[15],"behaviour:":[17],"phased":[18,32],"learning":[19,44],"selective":[21,55],"recognition.":[22],"For":[23,49],"complicated":[24],"scenarios":[25],"such":[26],"as":[27],"multi-agent":[28,51],"tasks,":[29,52],"we":[30,53,87],"propose":[31,54],"learning,":[33],"in":[34,39,45,57,77],"which":[35,58,78],"first":[37],"learn":[38],"a":[40,62],"simpler":[41],"environment":[42],"before":[43],"the":[46],"target":[47],"environment.":[48],"recognition,":[56],"an":[59],"agent":[60],"recognizes":[61],"partner,":[63],"with":[64,103,106],"whom":[65,107],"it":[66],"can":[67],"cooperate":[68,102],"earn":[70],"rewards,":[71],"selectively.":[72],"By":[73],"means":[74],"simulations":[76],"types":[80],"cooperated":[83],"capture":[85],"prey,":[86],"verified":[88],"that,":[89],"using":[90],"our":[91],"proposed":[92],"methods,":[93],"are":[95],"able":[96],"differentiate":[98],"they":[100,108],"should":[101,109],"from":[104],"those":[105],"not.":[110]},"counts_by_year":[{"year":2022,"cited_by_count":1},{"year":2020,"cited_by_count":1}],"updated_date":"2025-11-06T03:46:38.306776","created_date":"2025-10-10T00:00:00"}
