# ACM Robots file # advertising-related bots: User-agent: Mediapartners-Google* Disallow: / # Wikipedia work bots: User-agent: IsraBot Disallow: / User-agent: Orthogaffe Disallow: / # Crawlers that are kind enough to obey, but which we'd rather not have # unless they're feeding search engines. User-agent: UbiCrawler Disallow: / User-agent: DOC Disallow: / User-agent: Zao Disallow: / User-agent: dotbot Disallow: / User-agent: DotBot Disallow: / # Some bots are known to be trouble, particularly those designed to copy # entire sites. Please obey robots.txt. User-agent: sitecheck.internetseer.com Disallow: / User-agent: Zealbot Disallow: / User-agent: MSIECrawler Disallow: / User-agent: SiteSnagger Disallow: / User-agent: WebStripper Disallow: / User-agent: WebCopier Disallow: / User-agent: Fetch Disallow: / User-agent: Offline Explorer Disallow: / User-agent: Teleport Disallow: / User-agent: TeleportPro Disallow: / User-agent: WebZIP Disallow: / User-agent: linko Disallow: / User-agent: HTTrack Disallow: / User-agent: Microsoft.URL.Control Disallow: / User-agent: Xenu Disallow: / User-agent: larbin Disallow: / User-agent: libwww Disallow: / User-agent: ZyBORG Disallow: / User-agent: Download Ninja Disallow: / # Sorry, wget in its recursive mode is a frequent problem. # Please read the man page and use it properly; there is a # --wait option you can use to set the delay between hits, # for instance. User-agent: wget Disallow: / # The 'grub' distributed client has been *very* poorly behaved. User-agent: grub-client Disallow: / # Doesn't follow robots.txt anyway, but... User-agent: k2spider Disallow: / # Hits many times per second, not acceptable # http://www.nameprotect.com/botinfo.html User-agent: NPBot Disallow: / # A capture bot, downloads gazillions of pages with no public benefit # http://www.webreaper.net/ User-agent: WebReaper Disallow: / #GOOGLE - Mobile User-agent: Googlebot Disallow: /m.cacm.acm.org/browse-by-subject/ #GOOGLE User-agent: Googlebot Disallow: /browse-by-subject/ #GOOGLE User-agent: Googlebot Disallow: /*/comments #Yahoo Search User-agent: Slurp Crawl-delay: 40 Disallow: /browse-by-subject/ #MSN LiveSearch User-agent: msnbot Disallow: /browse-by-subject/ #ASK.COM Search User-agent: Teoma Disallow: /browse-by-subject/ #ProQuest Crawler Search User-agent: Magnolia Disallow: /browse-by-subject/ # ACM GSA 1; usually gsa1-crawler-acm User-agent: crawleracm1 Disallow: /browse-by-subject/ # ACM GSA 2 User-agent: gsa2-crawler-acm Disallow: /browse-by-subject/ # Don't allow indexing by any other bots # NOTE: this should be "Disallow: /", but the GSAs apparently get tripped up by this. User-agent: * Disallow: /browse-by-subject/ #User-agent: * #Crawl-delay: 60 User-agent: Grabber Disallow: *