#!/afs/athena.mit.edu/contrib/perl5/bin/perl -w
#
##!/usr/bin/perl -w
#
# sitescooper - download news from web sites and convert it automatically
#	 into one of several formats suitable for viewing on a Palm
#	 handheld.
#
# Skip down to read the POD documentation, or search for "=head1".
#
# Sitescooper is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.  See the COPYRIGHT section in the POD
# documentation below, or the "doc/gpl.html" file in the distribution,
# for further details.

#---------------------------------------------------------------------------

# Mac users -- you can pass in command-line arguments, such as "-isilo"
# or "-refresh", by changing this line. These will be prepended to the
# names of any documents you drag and drop onto sitescooper.
#
$Scoop::MAC_ARGS = "";

# the default configuration has now moved to the "sitescooper.cf" file,
# in the directory you installed sitescooper. On UNIX platforms this
# will be copied to your home directory so you can also edit it there.

#---------------------------------------------------------------------------

sub usage { die <<__ENDOFUSAGE;

Sitescooper - download news from web sites and convert it automatically
	into one of several formats suitable for viewing on a Palm
	handheld.

sitescooper [options] [ [-site sitename] ...]

sitescooper [options] [-sites sitename ...]

sitescooper [options] [-name nm] [-levels n] [-storyurl regexp]
	[-set sitefileparam value] url [...]

Options: [-debug] [-refresh] [-config file]
	[-install dir] [-instapp app] [-dump] [-dumpprc] [-nowrite]
	[-nodates] [-quiet] [-admin cmd] [-nolinkrewrite] [-stdout-to file]
	[-keep-tmps] [-noheaders] [-nofooters] [-fromcache]
	[-filename template] [-prctitle template] [-parallel] [-disc]
	[-limit numkbytes] [-maxlinks numlinks] [-maxstories numstories]
	[-text | -html | -mhtml | -doc | -isilo | -misilo | -richreader
	| -pipe fmt command]

file:// and http:// URLs are supported.

Version: $Scoop::VERSION
__ENDOFUSAGE
								    }

$SIG{__WARN__} = 'warn_log';
$SIG{__DIE__} = 'die_log';

use FindBin;

BEGIN {
  # sadly, we can't rely on File::Spec to do this for us. It's not
  # included with some versions of MacPerl.
  #
  if ($^O eq 'MacOS') {
    $_ = $FindBin::Bin; s/:$//;
    use lib $FindBin::Bin . ":lib:";
    use lib $FindBin::Bin . "::lib:sitescooper:lib:";
    use lib $FindBin::Bin . "::share:sitescooper:lib:";

  } elsif ($^O =~ /(win|os2)/) {
    use lib $FindBin::Bin . "\\lib";
    use lib $FindBin::Bin . "\\..\\lib\\sitescooper\\lib";
    use lib $FindBin::Bin . "\\..\\share\\sitescooper\\lib";

  } else {
    use lib $FindBin::Bin . "/lib";
    use lib $FindBin::Bin . "/../lib/sitescooper/lib";
    use lib $FindBin::Bin . "/../share/sitescooper/lib";
  }
}

# CUSTOMISE: you may need to add 'use "your_lib_dir";' here
BEGIN { 
    unshift( @INC, '/afs/sipb.mit.edu/project/pilot-locker/share/perllib');
}
use Scoop;
#use "/afs/sipb.mit.edu/project/pilot-locker/share/perllib/Scoop.pm";
&Scoop::Init;
&Scoop::ParseArgs;
&Scoop::ReadConfig;
&Scoop::ReadSites;
&Scoop::Run;
&Scoop::Finish;
&cleanexit;

# ---------------------------------------------------------------------------

sub scoop_die {
  print STDERR @_; &cleanexit(2);
}

sub scoop_warn {
  print STDERR @_;
}

sub dbg {
  print STDERR "debug: ",@_,"\n";
}

sub sitewarn {
  my $fname = shift;
  warn "Site \"$fname\": ".join('', @_)."\n";
}
sub verbose {
  print STDERR @_,"\n";
}

sub cleanexit {
  $SIG{__WARN__} = '';
  $SIG{__DIE__} = '';
  exit @_;
}

# ---------------------------------------------------------------------------

=head1 NAME

sitescooper - download news from web sites and convert it automatically
into one of several formats suitable for viewing on a Palm handheld.

=head1 SYNOPSIS

sitescooper [options] [ [-site sitename] ...]

sitescooper [options] [-sites sitename ...]

sitescooper [options] [-name nm] [-levels n] [-storyurl regexp]
	[-set sitefileparam value] url [...]

Options: [-debug] [-refresh] [-config file]
[-install dir] [-instapp app] [-dump] [-dumpprc] [-nowrite] [-nodates]
[-quiet] [-admin cmd] [-nolinkrewrite] [-stdout-to file]
[-keep-tmps] [-noheaders] [-nofooters] [-fromcache]
[-filename template] [-prctitle template] [-parallel] [-disc]
[-limit numkbytes] [-maxlinks numlinks] [-maxstories numstories]
[-text | -html | -mhtml | -doc | -isilo | -misilo | -richreader
| -pipe fmt command]

=head1 DESCRIPTION

This script, in conjunction with its configuration file and its set of
B<site> files, will download news stories from several top news sites into
text format and/or onto your Palm handheld (with the aid of the
B<makedoc>/B<MakeDocW> or B<iSilo> utilities).

Alternatively URLs can be supplied on the command line, in which case
those URLs will be downloaded and converted using a reasonable set of
default settings.

HTTP and local files, using the C<file:///> protocol, are both supported.

Multiple types of sites are supported:

=over 4

1-level sites, where the text to be converted is all present on one page
(such as Slashdot, Linux Weekly News, BluesNews, NTKnow, Ars Technica);

2-level sites, where the text to be converted is linked to from a Table
of Contents page (such as Wired News, BBC News, and I, Cringely);

3-level sites, where the text to be converted is linked to from a Table
of Contents page, which in turned is linked to from a list of issues
page (such as PalmPower).

=back

In addition sites that post news as items on one big page, such as
Slashdot, Ars Technica, and BluesNews, are supported using diff.

Note that at this moment in time, the URLs-on-the-command-line invocation
format does not support 2- or 3-level sites.

The script is portable to most UNIX variants that support perl, as well
as the Win32 platform (tested with ActivePerl 5.00502 build 509).

Currently the configuration is stored as a string inside the script
itself, but an alternative configuration file can be specified with the
B<-config> switch.

The sites downloaded will be the ones listed in the site files you keep in
your F<sites> directory.

sitescooper maintains a cache in its temporary directory; files are kept
in this cache for a week at most. Ditto for the text output directory
(set with B<TextSaveDir> in the built-in configuration).

If a password is required for the site, and the current sitescooper session
is interactive, the user will be prompted for the username and password.
This authentication token will be saved for later use.  This way a site
that requires login can be set up as a .site -- just log in once, and your
password is saved for future non-interactive runs.

Note however that the encryption used to hide the password in the
sitescooper configuration is pretty transparent; I recommend that rather
than using your own username and password to log in to passworded sites, a
dedicated, sitescooper account is used instead.

=head1 OPTIONS

=over 4

=item -refresh

Refresh all links -- ignore the F<already_seen> file, do not diff pages,
and always fetch links, even if they are available in the cache.

=item -config file

Read the configuration from B<file> instead of using the built-in one.

=item -limit numkbytes

Set the limit for output file size to B<numkbytes> kilobytes, instead of
the default 200K.

=item -maxlinks numlinks

Stop retrieving web pages after B<numlinks> have been traversed. This is
not used to specify how "deep" a site should be scooped -- it is the
number of links followed in total.

=item -maxstories numstories

Stop retrieving web pages after B<numstories> stories have been retrieved.

=item -install dir

The directory to save PRC files to once they've been converted, in order
to have them installed to your Palm handheld.

=item -instapp app

The application to run to install PRC files onto your Palm, once they've
been converted.

=item -site sitename

Limit the run to the site named in the B<sitename> argument.  Normally all
available sites will be downloaded. To limit the run to 2 or more sites,
provide multiple B<-site> arguments like so:

	-site ntk.site -site tbtf.site

=item -sites sitename [...]

Limit the run to multiple sites; an easier way to specify multiple sites
than using the -site argument for each file.

=item -name name

When specifying a URL on the command-line, this provides the name that
should be used when installing the site to the Pilot. It acts exactly
the same way as the Name: field in a site file.

=item -levels n

When specifying a URL on the command-line, this indicates how many levels
a site has. Not needed when using .site files.

=item -storyurl regexp

When specifying a URL on the command-line, this indicates the regular
expression which links to stories should conform to. Not needed when using
.site files.

=item -doc

Convert the page(s) downloaded into DOC format, with all the articles
listed in full, one after the other.

=item -text

Convert the page(s) downloaded into plain text format, with all the
articles listed in full, one after the other.

=item -html

Convert the page(s) downloaded into HTML format, on one big page, with
a table of contents (taken from the site if possible), followed by all
the articles one after another.

=item -mhtml

Convert the page(s) downloaded into HTML format, but retain the
multiple-page format. This will create the output in a directory
called B<site_name>; in conjunction with the B<-dump> argument,
it will output the path of this directory on standard output before
exiting.

=item -isilo

Convert the page(s) downloaded into iSilo format (see
http://www.isilo.com/ ), on one big page.  This is the default.  The
page(s) will be displayed with a table of contents (taken from the site if
possible), followed by all the articles one after another.

=item -misilo

Convert the page(s) downloaded into iSilo format (see
http://www.isilo.com/ ), with one iSilo document per site, with each story
on a separate page.  The iSilo document will have a table-of-contents
page, taken from the site if possible, with each article on a separate
page.

=item -richreader

Convert the page(s) downloaded into RichReader format using HTML2Doc.exe
(see http://users.erols.com/arenakm/palm/RichReader.html ).  The page(s)
will be displayed with a table of contents (taken from the site if
possible), followed by all the articles one after another.

=item -pipe fmt command

Convert the page(s) downloaded into an arbitrary format, using the command
provided. Sitescooper will still rewrite the page(s) according to the
B<fmt> argument, which should be one of:

=over 4

=item text

Plain text format.

=item html

HTML in one big page.

=item mhtml

HTML in multiple pages.

=back

The command argument can contain C<__SCOOPFILE__>, which will be replaced
with the filename of the file containing the rewritten pages in the above
format, C<__SYNCFILE__>, which will be replaced with a suitable filename
in the Palm synchronization folder, and C<__TITLE__>, which will be
replaced by the title of the file (generally a string containing the date
and site name).

Note that for the B<-mhtml> switch, C<__SCOOPFILE__> will be replaced
with the name of the file containing the table-of-contents page. It's up
to the conversion utility to follow the href links to the other files
in that directory.

=item -dump

Output the page(s) downloaded directly to stdout in text or HTML format,
instead of writing them to files and converting each one. This option
implies B<-text>; to dump HTML, use B<-dump -html>.

=item -dumpprc

Output the page(s) downloaded directly to stdout, in converted format as a
PRC file, suitable for installation to a Palm handheld.

=item -nowrite

Test mode -- do not write to the cache or already_seen file, instead write
what would be written normally to a directory called new_cache and a
new_already_seen file. This is very handy when writing a new site file.

=item -debug

Enable debugging output. This output is in addition to the usual progress
messages.

=item -quiet

Process sites quietly, without printing the usual progress messages to
STDERR. Warnings about incorrect site files and system errors will still
be output, however.

=item -admin cmd

Perform an administrative command. This is intended to ease the task of
writing scripts which use sitescooper output.  The following admin
commands are available:

=over 4

=item dump-sites

List the sites which would be scooped on a scooping run, and their URLs.
Instead of scooping any sites, sitescooper will exit after performing this
task.  The format is one site per line, with the site file name first, a
tab, the site's URL, a tab, the site name, a tab, and the output filename
that would be generated without path or extension. For example:

S<foobar.site	http://www.foobar.com/	Foo Bar	1999_01_01_Foo_Bar>

=item journal

Write a journal with dumps of the documents as they pass through the
formatting and stripping steps of the scooping process. This is
written to a file called B<journal> in the sitescooper temporary
directory.

=item import-cookies file

Import a Netscape B<cookies> file into sitescooper, so that certain sites
which require them, can use them. For example, the site
B<economist_full.site> requires this. Here's how to import cookies on a
UNIX machine:

S<sitescooper.pl -admin import-cookies ~/.netscape/cookies>

and on Windows:

S<perl sitescooper.pl -admin import-cookies
  "C:\Program Files\Netscape\Users\Default\cookies.txt">

Unfortunately, MS Internet Explorer cookies are currently unsupported.
If you wish to write a patch to support them, that'd be great.

=back

=item -nolinkrewrite

Do not rewrite links on scooped documents -- leave them exactly as they
are.

=item -noheaders

Do not attach the sitescooper header (URL, site name, and navigation
links) to each page.

=item -nofooters

Do not attach the sitescooper footer ("copyright retained by original
authors" blurb) to each page.

=item -fromcache

Do not perform any network access, retrieve everything from the cache or
the shared cache.

=item -filename template

Change the format of output filenames. B<template> contains the following
keyword strings, which are substituted as follows:

=over 4

=item YYYY

The current year, in 4-digit format.

=item MM

The current month number (from 01 to 12), in 2-digit format.

=item Mon

The current month name (from Jan to Dec), in 3-letter format.

=item DD

The current day of the month (from 01 to 31), in 2-digit format.

=item Day

The current day of the week (from Sun to Sat), in 3-letter format.

=item hh

The current hour (from 00 to 23), in 2-digit format.

=item mm

The current minute (from 00 to 59), in 2-digit format.

=item Site

The current site's name.

=item Section

The section of the current site (now obsolete).

=back

The default filename template is B<YYYY_MM_DD_Site>.

=item -prctitle template

Change the format of the titles of the resulting PRC files. B<template>
may contain the same keyword strings as B<-filename>.

The default PRC title template is B<YYYY-Mon-DD: Site>.

=item -nodates

Do not put the date in the installable file's filename. This allows you to
automatically overwrite old files with new ones when you HotSync. It's
a compatibility shortcut for B<-filename Site -prctitle "Site">.

=item -parallel

Use the LWP::Parallel perl module, if available, to preload certain
pages before the single-threaded sitescooper engine gets to them.
This can speed up the scooping of several sites at once, but is
more prone to crashes as the LWP::Parallel code is not as resilient
as the traditional LWP code. This is off by default.

=item -disc

Disconnect a PPP connection once the scooping has finished.  Currently this
code is experimental, and will probably only work on Macintoshes.  This is off
by default.

=item -stdout-to file

Redirect the output of sitescooper into the named file. This is needed on
Windows NT and 95, where certain combinations of perl and Windows do not
seem to support the &gt; operator.

=item -keep-tmps

Keep temporary files after conversion. Normally the .txt or .html
rendition of a site is deleted after conversion; this option keeps
it around.

=back

=head1 INSTALLATION

To install, edit the script and change the #! line. You may also need to
(a) change the Pilot install dir if you plan to use the pilot installation
functionality, and (b) edit the other parameters marked with CUSTOMISE in
case they need to be customised for your site. They should be set to
acceptable defaults (unless I forgot to comment out the proxy server lines
I use ;).

=head1 EXAMPLES

	sitescooper.pl http://www.ntk.net/

To snarf the ever-cutting NTKnow newsletter.

	sitescooper.pl -refresh -html http://www.ntk.net/

To snarf NTKnow, ignoring any previously-read text, and producing HTML
output.

=head1 ENVIRONMENT

B<sitescooper> makes use of the C<$http_proxy> environment variable, if it
is set.

=head1 AUTHOR

Justin Mason E<lt>jm /at/ jmason.orgE<gt>

=head1 COPYRIGHT

Copyright (C) 1999-2000 Justin Mason

This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option)
any later version.

This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA  02111-1307, USA, or read it on
the web at http://www.gnu.org/copyleft/gpl.html .

=head1 SCRIPT CATEGORIES

The CPAN script category for this script is C<Web>. See
http://www.cpan.org/scripts/ .

=head1 PREREQUISITES

C<File::Find>
C<File::Copy>
C<File::Path>
C<FindBin>
C<Carp>
C<Cwd>
C<URI::URL>
C<LWP::UserAgent>
C<HTTP::Request::Common>
C<HTTP::Date>
C<HTML::Entities>

All these can be picked up from CPAN at http://www.cpan.org/ .  Note that
C<HTML::Entities> is actually included in one of the previous packages, so
you do not need to install it separately.

=head1 COREQUISITES

C<LWP::Parallel> will be used if available.  C<Win32::TieRegistry>, if
running on a Win32 platform, to find the Pilot Desktop software's
installation directory. C<Algorithm::Diff> to support diffing sites
without running an external B<diff> application (this is required on Mac
systems).

=head1 README

Sitescooper downloads news stories from the web and converts them to Palm
handheld iSilo, DOC or text format for later reading on-the-move.  Site
files and full documentation can be found at
http://sitescooper.cx/ .

=cut

#---------------------------------------------------------------------------

