{"id":"https://openalex.org/W7140980343","doi":"https://doi.org/10.48550/arxiv.2603.24730","title":"A Framework for Generating Semantically Ambiguous Images to Probe Human and Machine Perception","display_name":"A Framework for Generating Semantically Ambiguous Images to Probe Human and Machine Perception","publication_year":2026,"publication_date":"2026-03-25","ids":{"openalex":"https://openalex.org/W7140980343","doi":"https://doi.org/10.48550/arxiv.2603.24730"},"language":null,"primary_location":{"id":"doi:10.48550/arxiv.2603.24730","is_oa":true,"landing_page_url":"https://doi.org/10.48550/arxiv.2603.24730","pdf_url":null,"source":{"id":"https://openalex.org/S4306400194","display_name":"arXiv (Cornell University)","issn_l":null,"issn":null,"is_oa":true,"is_in_doaj":false,"is_core":false,"host_organization":"https://openalex.org/I205783295","host_organization_name":"Cornell University","host_organization_lineage":["https://openalex.org/I205783295"],"host_organization_lineage_names":[],"type":"repository"},"license":null,"license_id":null,"version":null,"is_accepted":false,"is_published":false,"raw_source_name":null,"raw_type":"article"},"type":"preprint","indexed_in":["datacite"],"open_access":{"is_oa":true,"oa_status":"green","oa_url":"https://doi.org/10.48550/arxiv.2603.24730","any_repository_has_fulltext":true},"authorships":[{"author_position":"first","author":{"id":"https://openalex.org/A5130740693","display_name":"Yuqi Hu","orcid":null},"institutions":[],"countries":[],"is_corresponding":true,"raw_author_name":"Hu, Yuqi","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"middle","author":{"id":"https://openalex.org/A5015532578","display_name":"Vasha DuTell","orcid":"https://orcid.org/0000-0001-8625-1350"},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"DuTell, Vasha","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"middle","author":{"id":"https://openalex.org/A5027178725","display_name":"Ahna R. Girshick","orcid":null},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"Girshick, Ahna R.","raw_affiliation_strings":[],"affiliations":[]},{"author_position":"last","author":{"id":"https://openalex.org/A5024180947","display_name":"Jennifer E. Corbett","orcid":"https://orcid.org/0000-0002-9412-7963"},"institutions":[],"countries":[],"is_corresponding":false,"raw_author_name":"Corbett, Jennifer E.","raw_affiliation_strings":[],"affiliations":[]}],"institutions":[],"countries_distinct_count":0,"institutions_distinct_count":4,"corresponding_author_ids":["https://openalex.org/A5130740693"],"corresponding_institution_ids":[],"apc_list":null,"apc_paid":null,"fwci":null,"has_fulltext":false,"cited_by_count":0,"citation_normalized_percentile":null,"cited_by_percentile_year":null,"biblio":{"volume":null,"issue":null,"first_page":null,"last_page":null},"is_retracted":false,"is_paratext":false,"is_xpac":false,"primary_topic":{"id":"https://openalex.org/T11094","display_name":"Face Recognition and Perception","score":0.932200014591217,"subfield":{"id":"https://openalex.org/subfields/2805","display_name":"Cognitive Neuroscience"},"field":{"id":"https://openalex.org/fields/28","display_name":"Neuroscience"},"domain":{"id":"https://openalex.org/domains/1","display_name":"Life Sciences"}},"topics":[{"id":"https://openalex.org/T11094","display_name":"Face Recognition and Perception","score":0.932200014591217,"subfield":{"id":"https://openalex.org/subfields/2805","display_name":"Cognitive Neuroscience"},"field":{"id":"https://openalex.org/fields/28","display_name":"Neuroscience"},"domain":{"id":"https://openalex.org/domains/1","display_name":"Life Sciences"}},{"id":"https://openalex.org/T11605","display_name":"Visual Attention and Saliency Detection","score":0.019500000402331352,"subfield":{"id":"https://openalex.org/subfields/1707","display_name":"Computer Vision and Pattern Recognition"},"field":{"id":"https://openalex.org/fields/17","display_name":"Computer Science"},"domain":{"id":"https://openalex.org/domains/3","display_name":"Physical Sciences"}},{"id":"https://openalex.org/T12650","display_name":"Aesthetic Perception and Analysis","score":0.007899999618530273,"subfield":{"id":"https://openalex.org/subfields/2805","display_name":"Cognitive Neuroscience"},"field":{"id":"https://openalex.org/fields/28","display_name":"Neuroscience"},"domain":{"id":"https://openalex.org/domains/1","display_name":"Life Sciences"}}],"keywords":[{"id":"https://openalex.org/keywords/interpretability","display_name":"Interpretability","score":0.8770999908447266},{"id":"https://openalex.org/keywords/ambiguity","display_name":"Ambiguity","score":0.6383000016212463},{"id":"https://openalex.org/keywords/embedding","display_name":"Embedding","score":0.5169000029563904},{"id":"https://openalex.org/keywords/image","display_name":"Image (mathematics)","score":0.4586000144481659},{"id":"https://openalex.org/keywords/machine-vision","display_name":"Machine vision","score":0.4092000126838684},{"id":"https://openalex.org/keywords/semantic-gap","display_name":"Semantic gap","score":0.4074999988079071},{"id":"https://openalex.org/keywords/perception","display_name":"Perception","score":0.4066999852657318},{"id":"https://openalex.org/keywords/human-visual-system-model","display_name":"Human visual system model","score":0.39149999618530273},{"id":"https://openalex.org/keywords/semantics","display_name":"Semantics (computer science)","score":0.38089999556541443},{"id":"https://openalex.org/keywords/feature","display_name":"Feature (linguistics)","score":0.36890000104904175}],"concepts":[{"id":"https://openalex.org/C2781067378","wikidata":"https://www.wikidata.org/wiki/Q17027399","display_name":"Interpretability","level":2,"score":0.8770999908447266},{"id":"https://openalex.org/C154945302","wikidata":"https://www.wikidata.org/wiki/Q11660","display_name":"Artificial intelligence","level":1,"score":0.7103999853134155},{"id":"https://openalex.org/C41008148","wikidata":"https://www.wikidata.org/wiki/Q21198","display_name":"Computer science","level":0,"score":0.6909000277519226},{"id":"https://openalex.org/C2780522230","wikidata":"https://www.wikidata.org/wiki/Q1140419","display_name":"Ambiguity","level":2,"score":0.6383000016212463},{"id":"https://openalex.org/C41608201","wikidata":"https://www.wikidata.org/wiki/Q980509","display_name":"Embedding","level":2,"score":0.5169000029563904},{"id":"https://openalex.org/C115961682","wikidata":"https://www.wikidata.org/wiki/Q860623","display_name":"Image (mathematics)","level":2,"score":0.4586000144481659},{"id":"https://openalex.org/C5339829","wikidata":"https://www.wikidata.org/wiki/Q1425977","display_name":"Machine vision","level":2,"score":0.4092000126838684},{"id":"https://openalex.org/C86034646","wikidata":"https://www.wikidata.org/wiki/Q474311","display_name":"Semantic gap","level":4,"score":0.4074999988079071},{"id":"https://openalex.org/C26760741","wikidata":"https://www.wikidata.org/wiki/Q160402","display_name":"Perception","level":2,"score":0.4066999852657318},{"id":"https://openalex.org/C119857082","wikidata":"https://www.wikidata.org/wiki/Q2539","display_name":"Machine learning","level":1,"score":0.4027000069618225},{"id":"https://openalex.org/C160086991","wikidata":"https://www.wikidata.org/wiki/Q5939193","display_name":"Human visual system model","level":3,"score":0.39149999618530273},{"id":"https://openalex.org/C184337299","wikidata":"https://www.wikidata.org/wiki/Q1437428","display_name":"Semantics (computer science)","level":2,"score":0.38089999556541443},{"id":"https://openalex.org/C31972630","wikidata":"https://www.wikidata.org/wiki/Q844240","display_name":"Computer vision","level":1,"score":0.37049999833106995},{"id":"https://openalex.org/C2776401178","wikidata":"https://www.wikidata.org/wiki/Q12050496","display_name":"Feature (linguistics)","level":2,"score":0.36890000104904175},{"id":"https://openalex.org/C100776233","wikidata":"https://www.wikidata.org/wiki/Q2532492","display_name":"Bridge (graph theory)","level":2,"score":0.3546000123023987},{"id":"https://openalex.org/C153180895","wikidata":"https://www.wikidata.org/wiki/Q7148389","display_name":"Pattern recognition (psychology)","level":2,"score":0.3321000039577484},{"id":"https://openalex.org/C2778572836","wikidata":"https://www.wikidata.org/wiki/Q380933","display_name":"Space (punctuation)","level":2,"score":0.32690000534057617},{"id":"https://openalex.org/C12267149","wikidata":"https://www.wikidata.org/wiki/Q282453","display_name":"Support vector machine","level":2,"score":0.3149999976158142},{"id":"https://openalex.org/C116834253","wikidata":"https://www.wikidata.org/wiki/Q2039217","display_name":"Identification (biology)","level":2,"score":0.31139999628067017},{"id":"https://openalex.org/C26517878","wikidata":"https://www.wikidata.org/wiki/Q228039","display_name":"Key (lock)","level":2,"score":0.30489999055862427},{"id":"https://openalex.org/C139793654","wikidata":"https://www.wikidata.org/wiki/Q174923","display_name":"Optical illusion","level":3,"score":0.30329999327659607},{"id":"https://openalex.org/C2778755073","wikidata":"https://www.wikidata.org/wiki/Q10858537","display_name":"Scale (ratio)","level":2,"score":0.29829999804496765},{"id":"https://openalex.org/C2780009758","wikidata":"https://www.wikidata.org/wiki/Q6804172","display_name":"Measure (data warehouse)","level":2,"score":0.2930000126361847},{"id":"https://openalex.org/C198352243","wikidata":"https://www.wikidata.org/wiki/Q37105","display_name":"Line (geometry)","level":2,"score":0.29159998893737793},{"id":"https://openalex.org/C204321447","wikidata":"https://www.wikidata.org/wiki/Q30642","display_name":"Natural language processing","level":1,"score":0.29120001196861267},{"id":"https://openalex.org/C184047640","wikidata":"https://www.wikidata.org/wiki/Q182593","display_name":"Illusion","level":2,"score":0.28940001130104065},{"id":"https://openalex.org/C178253425","wikidata":"https://www.wikidata.org/wiki/Q162668","display_name":"Visual perception","level":3,"score":0.2847000062465668},{"id":"https://openalex.org/C146047270","wikidata":"https://www.wikidata.org/wiki/Q469666","display_name":"Human\u2013machine system","level":2,"score":0.27869999408721924},{"id":"https://openalex.org/C167966045","wikidata":"https://www.wikidata.org/wiki/Q5532625","display_name":"Generative model","level":3,"score":0.2782000005245209},{"id":"https://openalex.org/C39890363","wikidata":"https://www.wikidata.org/wiki/Q36108","display_name":"Generative grammar","level":2,"score":0.271699994802475},{"id":"https://openalex.org/C75294576","wikidata":"https://www.wikidata.org/wiki/Q5165192","display_name":"Contextual image classification","level":3,"score":0.2696000039577484},{"id":"https://openalex.org/C33676613","wikidata":"https://www.wikidata.org/wiki/Q13415176","display_name":"Dimension (graph theory)","level":2,"score":0.26840001344680786},{"id":"https://openalex.org/C126780896","wikidata":"https://www.wikidata.org/wiki/Q899871","display_name":"Distortion (music)","level":4,"score":0.2540000081062317}],"mesh":[],"locations_count":1,"locations":[{"id":"doi:10.48550/arxiv.2603.24730","is_oa":true,"landing_page_url":"https://doi.org/10.48550/arxiv.2603.24730","pdf_url":null,"source":{"id":"https://openalex.org/S4306400194","display_name":"arXiv (Cornell University)","issn_l":null,"issn":null,"is_oa":true,"is_in_doaj":false,"is_core":false,"host_organization":"https://openalex.org/I205783295","host_organization_name":"Cornell University","host_organization_lineage":["https://openalex.org/I205783295"],"host_organization_lineage_names":[],"type":"repository"},"license":null,"license_id":null,"version":null,"is_accepted":false,"is_published":null,"raw_source_name":null,"raw_type":"article"}],"best_oa_location":{"id":"doi:10.48550/arxiv.2603.24730","is_oa":true,"landing_page_url":"https://doi.org/10.48550/arxiv.2603.24730","pdf_url":null,"source":{"id":"https://openalex.org/S4306400194","display_name":"arXiv (Cornell University)","issn_l":null,"issn":null,"is_oa":true,"is_in_doaj":false,"is_core":false,"host_organization":"https://openalex.org/I205783295","host_organization_name":"Cornell University","host_organization_lineage":["https://openalex.org/I205783295"],"host_organization_lineage_names":[],"type":"repository"},"license":null,"license_id":null,"version":null,"is_accepted":false,"is_published":false,"raw_source_name":null,"raw_type":"article"},"sustainable_development_goals":[{"display_name":"Reduced inequalities","score":0.5808561444282532,"id":"https://metadata.un.org/sdg/10"}],"awards":[],"funders":[],"has_content":{"pdf":false,"grobid_xml":false},"content_urls":null,"referenced_works_count":0,"referenced_works":[],"related_works":[],"abstract_inverted_index":{"The":[0],"classic":[1],"duck-rabbit":[2],"illusion":[3],"reveals":[4],"that":[5,65,102],"when":[6],"visual":[7],"evidence":[8],"is":[9],"ambiguous,":[10],"the":[11,26,39,56,70,117,124,151],"human":[12,23,130,154],"brain":[13],"must":[14],"decide":[15],"what":[16],"it":[17,37],"sees.":[18],"But":[19],"where":[20,86],"exactly":[21],"do":[22,33],"observers":[24],"draw":[25,36],"line":[27],"between":[28,58,67,153],"''duck''":[29],"and":[30,32,87,90,123,159,171],"''rabbit'',":[31,110],"machine":[34,91,103,135],"classifiers":[35,92,104],"in":[38,69],"same":[40],"place?":[41],"We":[42,60],"use":[43],"semantically":[44],"ambiguous":[45,79],"images":[46],"as":[47,145],"interpretability":[48],"probes":[49],"to":[50,74,83,128,149],"expose":[51],"how":[52,88,140],"vision":[53],"models":[54],"represent":[55],"boundaries":[57],"concepts.":[59],"present":[61],"a":[62,146],"psychophysically-informed":[63],"framework":[64,138],"interpolates":[66],"concepts":[68],"CLIP":[71,118],"embedding":[72,119],"space":[73],"generate":[75],"continuous":[76],"spectra":[77],"of":[78],"images,":[80],"allowing":[81],"us":[82],"precisely":[84],"measure":[85],"humans":[89,112],"place":[93],"their":[94],"semantic":[95],"boundaries.":[96],"Using":[97],"this":[98],"framework,":[99],"we":[100],"show":[101],"are":[105,113],"more":[106,114,132],"biased":[107],"towards":[108],"seeing":[109],"whereas":[111],"aligned":[115],"with":[116],"used":[120],"for":[121],"synthesis,":[122],"guidance":[125],"scale":[126],"seems":[127],"affect":[129],"sensitivity":[131],"strongly":[133],"than":[134],"classifiers.":[136],"Our":[137],"demonstrates":[139],"controlled":[141],"ambiguity":[142],"can":[143],"serve":[144],"diagnostic":[147],"tool":[148],"bridge":[150],"gap":[152],"psychophysical":[155],"analysis,":[156],"image":[157,161,172],"classification,":[158],"generative":[160],"models,":[162],"offering":[163],"insight":[164],"into":[165],"human-model":[166],"alignment,":[167],"robustness,":[168],"model":[169],"interpretability,":[170],"synthesis":[173],"methods.":[174]},"counts_by_year":[],"updated_date":"2026-03-28T06:16:51.555046","created_date":"2026-03-28T00:00:00"}
