# # Example config file for ht://Dig. # # This configuration file is used by all the programs that make up ht://Dig. # Please refer to the attribute reference manual for more details on what # can be put into this file. (http://www.htdig.org/confindex.html) # Note that most attributes have very reasonable default values so you # really only have to add attributes here if you want to change the defaults. # # What follows are some of the common attributes you might want to change. # # # Specify where the database files need to go. Make sure that there is # plenty of free disk space available for the databases. They can get # pretty big. # database_dir: /var/spool/htdig # # This specifies the URL where the robot (htdig) will start. You can specify # multiple URLs here. Just separate them by some whitespace. # The example here will cause the ht://Dig homepage and related pages to be # indexed. # You could also index all the URLs in a file like so: # start_url: `${common_dir}/start.url` # start_url: http://localhost/ # # This attribute limits the scope of the indexing process. The default is to # set it to the same as the start_url above. This way only pages that are on # the sites specified in the start_url attribute will be indexed and it will # reject any URLs that go outside of those sites. # # Keep in mind that the value for this attribute is just a list of string # patterns. As long as URLs contain at least one of the patterns it will be # seen as part of the scope of the index. # limit_urls_to: ${start_url} # # If there are particular pages that you definately do NOT want to index, you # can use the exclude_urls attribute. The value is a list of string patterns. # If a URL matches any of the patterns, it will NOT be indexed. This is # useful to exclude things like virtual web trees or database accesses. By # default, all CGI URLs will be excluded. (Note that the /cgi-bin/ convention # may not work on your web server. Check the path prefix used on your web # server.) # exclude_urls: /cgi-bin/ .cgi # # Since ht://Dig does not (and cannot) parse every document type, this # attribute is a list of strings (extensions) that will be ignored during # indexing. These are *only* checked at the end of a URL, whereas # exclude_url patterns are matched anywhere. # bad_extensions: .wav .gz .z .sit .au .zip .tar .hqx .exe .com .gif \ .jpg .jpeg .aiff .class .map .ram .tgz .bin .rpm .mpg .mov .avi # # The string htdig will send in every request to identify the robot. Change # this to your email address. # maintainer: unconfigured@htdig.searchengine.maintainer # # The excerpts that are displayed in long results rely on stored information # in the index databases. The compiled default only stores 512 characters of # text from each document (this excludes any HTML markup...) If you plan on # using the excerpts you probably want to make this larger. The only concern # here is that more disk space is going to be needed to store the additional # information. Since disk space is cheap (! :-)) you might want to set this # to a value so that a large percentage of the documents that you are going # to be indexing are stored completely in the database. At SDSU we found # that by setting this value to about 50k the index would get 97% of all # documents completely and only 3% was cut off at 50k. You probably want to # experiment with this value. # Note that if you want to set this value low, you probably want to set the # excerpt_show_top attribute to false so that the top excerpt_length characters # of the document are always shown. # max_head_length: 10000 # # To limit network connections, ht://Dig will only pull up to a certain limit # of bytes. This prevents the indexing from dying because the server keeps # sending information. However, several FAQs happen because people have files # bigger than the default limit of 100KB. This sets the default a bit higher. # (see <http://www.htdig.org/FAQ.html> for more) # max_doc_size: 200000 # # Most people expect some sort of excerpt in results. By defaul