X-Git-Url: http://gb7djk.dxcluster.net/gitweb/gitweb.cgi?a=blobdiff_plain;f=perl%2FWCY.pm;h=7f19ccfd1263c007fb2157d44ec55a1dd4d2c9a5;hb=f0910da57e166acb22e83de4e4b771d175074c80;hp=20b6a184274538fff31238586b6253911e4d46d6;hpb=261c75481017f32ca491df475b36e9600ca430a1;p=spider.git diff --git a/perl/WCY.pm b/perl/WCY.pm index 20b6a184..7f19ccfd 100644 --- a/perl/WCY.pm +++ b/perl/WCY.pm @@ -20,7 +20,7 @@ use Data::Dumper; use strict; use vars qw($date $sfi $k $expk $a $r $sa $gmf $au @allowed @denied $fp $node $from $dirprefix $param - %dup $duplth $dupage); + $duplth $dupage $filterdef); $fp = 0; # the DXLog fcb $date = 0; # the unix time of the WWV (notional) @@ -35,13 +35,26 @@ $node = ""; # originating node $from = ""; # who this came from @allowed = (); # if present only these callsigns are regarded as valid WWV updators @denied = (); # if present ignore any wwv from these callsigns -%dup = (); # the spot duplicates hash $duplth = 20; # the length of text to use in the deduping $dupage = 12*3600; # the length of time to hold spot dups $dirprefix = "$main::data/wcy"; $param = "$dirprefix/param"; +$filterdef = bless ([ + # tag, sort, field, priv, special parser + ['by', 'c', 11], + ['origin', 'c', 12], + ['channel', 'n', 13], + ['by_dxcc', 'n', 14], + ['by_itu', 'n', 15], + ['by_zone', 'n', 16], + ['origin_dxcc', 'c', 17], + ['origin_itu', 'c', 18], + ['origin_itu', 'c', 19], + ], 'Filter::Cmd'); + + sub init { $fp = DXLog::new('wcy', 'dat', 'm'); @@ -137,12 +150,13 @@ sub search { my $from = shift; my $to = shift; - my @date = $fp->unixtoj(shift); + my $date = $fp->unixtoj(shift); my $pattern = shift; my $search; my @out; my $eval; my $count; + my $i; $search = 1; $eval = qq( @@ -160,9 +174,8 @@ sub search ); $fp->close; # close any open files - - my $fh = $fp->open(@date); - for ($count = 0; $count < $to; ) { + my $fh = $fp->open($date); + for ($i = $count = 0; $count < $to; $i++ ) { my @in = (); if ($fh) { while (<$fh>) { @@ -196,7 +209,7 @@ sub print_item my $d = cldate($r->[0]); my $t = (gmtime($r->[0]))[2]; - return sprintf("$d %02d %5d %3d %3d %3d %3d %-5s %-5s %-3s <%s>", + return sprintf("$d %02d %5d %3d %3d %3d %3d %-5s %-5s %6s <%s>", $t, @$r[1..9]); } @@ -205,8 +218,8 @@ sub print_item # sub readfile { - my @date = $fp->unixtoj(shift); - my $fh = $fp->open(@date); + my $date = $fp->unixtoj(shift); + my $fh = $fp->open($date); my @spots = (); my @in; @@ -227,34 +240,13 @@ sub dup # dump if too old return 2 if $d < $main::systime - $dupage; -# chomp $text; -# $text = substr($text, 0, $duplth) if length $text > $duplth; - my $dupkey = "$d|$sfi|$k|$a|$r"; - return 1 if exists $dup{$dupkey}; - $dup{$dupkey} = $d; # in seconds (to the nearest minute) - return 0; -} - -# called every hour and cleans out the dup cache -sub process -{ - my $cutoff = $main::systime - $dupage; - while (my ($key, $val) = each %dup) { - delete $dup{$key} if $val < $cutoff; - } + my $dupkey = "C$d|$sfi|$k|$a|$r"; + return DXDupe::check($dupkey, $main::systime+$dupage); } sub listdups { - my $regex = shift; - $regex = '.*' unless $regex; - $regex =~ s/[\$\@\%]//g; - my @out; - for (sort { $dup{$a} <=> $dup{$b} } grep { m{$regex}i } keys %dup) { - my $val = $dup{$_}; - push @out, "$_ = " . cldatetime($val); - } - return @out; + return DXDupe::listdups('C', $dupage, @_); } 1; __END__;