X-Git-Url: http://gb7djk.dxcluster.net/gitweb/gitweb.cgi?a=blobdiff_plain;f=perl%2FSpot.pm;h=1e7de69a7b32a5f9dbe8ab2651528143984fb801;hb=261c75481017f32ca491df475b36e9600ca430a1;hp=b8938bb9a174fc24f94fe5d5ed8e03420a522f1d;hpb=d5b4190c36f130852973121042876af3c5642cd7;p=spider.git diff --git a/perl/Spot.pm b/perl/Spot.pm index b8938bb9..1e7de69a 100644 --- a/perl/Spot.pm +++ b/perl/Spot.pm @@ -8,28 +8,30 @@ package Spot; -use FileHandle; +use IO::File; use DXVars; use DXDebug; use DXUtil; use DXLog; use Julian; use Prefix; -use Carp; use strict; -use vars qw($fp $maxspots $defaultspots $maxdays $dirprefix); +use vars qw($fp $maxspots $defaultspots $maxdays $dirprefix %dup $duplth $dupage); $fp = undef; $maxspots = 50; # maximum spots to return $defaultspots = 10; # normal number of spots to return $maxdays = 35; # normal maximum no of days to go back $dirprefix = "spots"; +%dup = (); # the spot duplicates hash +$duplth = 20; # the length of text to use in the deduping +$dupage = 3*3600; # the length of time to hold spot dups sub init { mkdir "$dirprefix", 0777 if !-e "$dirprefix"; - $fp = DXLog::new($dirprefix, "dat", 'd') + $fp = DXLog::new($dirprefix, "dat", 'd'); } sub prefix @@ -41,25 +43,37 @@ sub prefix sub add { my @spot = @_; # $freq, $call, $t, $comment, $spotter = @_ + my @out = @spot[0..4]; # just up to the spotter - # sure that the numeric things are numeric now (saves time later) - $spot[0] = 0 + $spot[0]; - $spot[2] = 0 + $spot[2]; + # normalise frequency + $spot[0] = sprintf "%.f", $spot[0]; - # remove ssid if present on spotter - $spot[4] =~ s/-\d+$//o; + # remove ssids if present on spotter + $out[4] =~ s/-\d+$//o; - # add the 'dxcc' country on the end - my @dxcc = Prefix::extract($spot[1]); - push @spot, (@dxcc > 0 ) ? $dxcc[1]->dxcc() : 0; - - my $buf = join("\^", @spot); + # remove leading and trailing spaces + $spot[3] = unpad($spot[3]); + + # add the 'dxcc' country on the end for both spotted and spotter, then the cluster call + my @dxcc = Prefix::extract($out[1]); + my $spotted_dxcc = (@dxcc > 0 ) ? $dxcc[1]->dxcc() : 0; + my $spotted_itu = (@dxcc > 0 ) ? $dxcc[1]->itu() : 0; + my $spotted_cq = (@dxcc > 0 ) ? $dxcc[1]->cq() : 0; + push @out, $spotted_dxcc; + @dxcc = Prefix::extract($out[4]); + my $spotter_dxcc = (@dxcc > 0 ) ? $dxcc[1]->dxcc() : 0; + my $spotter_itu = (@dxcc > 0 ) ? $dxcc[1]->itu() : 0; + my $spotter_cq = (@dxcc > 0 ) ? $dxcc[1]->cq() : 0; + push @out, $spotter_dxcc; + push @out, $spot[5]; + + my $buf = join("\^", @out); # compare dates to see whether need to open another save file (remember, redefining $fp # automagically closes the output file (if any)). - $fp->writeunix($spot[2], $buf); + $fp->writeunix($out[2], $buf); - return $buf; + return (@out, $spotted_itu, $spotted_cq, $spotter_itu, $spotter_cq); } # search the spot database for records based on the field no and an expression @@ -73,7 +87,10 @@ sub add # $f2 = date in unix format # $f3 = comment # $f4 = spotter -# $f5 = dxcc country +# $f5 = spotted dxcc country +# $f6 = spotter dxcc country +# $f7 = origin +# # # In addition you can specify a range of days, this means that it will start searching # from days less than today to days less than today @@ -92,27 +109,18 @@ sub search my $ref; my $i; my $count; - my @today = Julian::unixtoj(time); + my @today = Julian::unixtoj(time()); my @fromdate; my @todate; - - if ($dayfrom > 0) { - @fromdate = Julian::sub(@today, $dayfrom); - } else { - @fromdate = @today; - $dayfrom = 0; - } - if ($dayto > 0) { - @todate = Julian::sub(@fromdate, $dayto); - } else { - @todate = Julian::sub(@fromdate, $maxdays); - } - if ($from || $to) { - $to = $from + $maxspots if $to - $from > $maxspots || $to - $from <= 0; - } else { - $from = 0; - $to = $defaultspots; - } + + $dayfrom = 0 if !$dayfrom; + $dayto = $maxdays if !$dayto; + @fromdate = Julian::sub(@today, $dayfrom); + @todate = Julian::sub(@fromdate, $dayto); + $from = 0 unless $from; + $to = $defaultspots unless $to; + + $to = $from + $maxspots if $to - $from > $maxspots || $to - $from <= 0; $expr =~ s/\$f(\d)/\$ref->[$1]/g; # swap the letter n for the correct field name # $expr =~ s/\$f(\d)/\$spots[$1]/g; # swap the letter n for the correct field name @@ -123,21 +131,20 @@ sub search $eval = qq( my \$c; my \$ref; - for (\$c = \$ #spots; \$c >= 0; \$c--) { + for (\$c = \$#spots; \$c >= 0; \$c--) { \$ref = \$spots[\$c]; if ($expr) { \$count++; next if \$count < \$from; # wait until from push(\@out, \$ref); - last LOOP if \$count >= \$to; # stop after to + last if \$count >= \$to; # stop after to } } ); $fp->close; # close any open files - LOOP: - for ($i = 0; $i < $maxdays; ++$i) { # look thru $maxdays worth of files only + for ($i = $count = 0; $i < $maxdays; ++$i) { # look thru $maxdays worth of files only my @now = Julian::sub(@fromdate, $i); # but you can pick which $maxdays worth last if Julian::cmp(@now, @todate) <= 0; @@ -150,6 +157,7 @@ sub search push @spots, [ split '\^' ]; } eval $eval; # do the search on this file + last if $count >= $to; # stop after to return ("Spot search error", $@) if $@; } } @@ -160,18 +168,21 @@ sub search # format a spot for user output in 'broadcast' mode sub formatb { - my @dx = @_; - my $t = ztime($dx[2]); - return sprintf "DX de %-7.7s%11.1f %-12.12s %-30s %s", "$dx[4]:", $dx[0], $dx[1], $dx[3], $t ; + my $wantgrid = shift; + my $t = ztime($_[2]); + my $ref = DXUser->get_current($_[4]); + my $loc = $ref->qra if $ref && $ref->qra && $wantgrid; + $loc = ' ' . substr($ref->qra, 0, 4) if $loc; + $loc = "" unless $loc; + return sprintf "DX de %-7.7s%11.1f %-12.12s %-30s %s$loc", "$_[4]:", $_[0], $_[1], $_[3], $t ; } # format a spot for user output in list mode sub formatl { - my @dx = @_; - my $t = ztime($dx[2]); - my $d = cldate($dx[2]); - return sprintf "%8.1f %-11s %s %s %-28.28s%7s>", $dx[0], $dx[1], $d, $t, $dx[3], "<$dx[4]" ; + my $t = ztime($_[2]); + my $d = cldate($_[2]); + return sprintf "%8.1f %-11s %s %s %-28.28s%7s>", $_[0], $_[1], $d, $t, $_[3], "<$_[4]" ; } # @@ -191,4 +202,48 @@ sub readfile } return @spots; } + +# enter the spot for dup checking and return true if it is already a dup +sub dup +{ + my ($freq, $call, $d, $text) = @_; + + # dump if too old + return 2 if $d < $main::systime - $dupage; + + $freq = sprintf "%.1f", $freq; # normalise frequency + chomp $text; + $text = substr($text, 0, $duplth) if length $text > $duplth; + unpad($text); + my $dupkey = "$freq|$call|$d|$text"; + return 1 if exists $dup{$dupkey}; + $dup{$dupkey} = $d; # in seconds (to the nearest minute) + return 0; +} + +# called every hour and cleans out the dup cache +sub process +{ + my $cutoff = $main::systime - $dupage; + while (my ($key, $val) = each %dup) { + delete $dup{$key} if $val < $cutoff; + } +} + +sub listdups +{ + my $regex = shift; + $regex = '.*' unless $regex; + $regex =~ s/[\$\@\%]//g; + my @out; + for (sort { $dup{$a} <=> $dup{$b} } grep { m{$regex}i } keys %dup) { + my $val = $dup{$_}; + push @out, "$_ = " . cldatetime($val); + } + return @out; +} 1; + + + +