package C4::Matcher;
-# Copyright (C) 2007 LibLime
+# Copyright (C) 2007 LibLime, 2012 C & P Bibliography Services
#
# This file is part of Koha.
#
-# Koha is free software; you can redistribute it and/or modify it under the
-# terms of the GNU General Public License as published by the Free Software
-# Foundation; either version 2 of the License, or (at your option) any later
-# version.
+# Koha is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
#
-# Koha is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+# Koha is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License along
-# with Koha; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License
+# along with Koha; if not, see <http://www.gnu.org/licenses>.
-use strict;
-use warnings;
+use Modern::Perl;
-use C4::Context;
use MARC::Record;
-use C4::Search;
-use C4::Biblio;
-use vars qw($VERSION);
-
-BEGIN {
- # set the version for version checking
- $VERSION = 3.07.00.049;
-}
+use Koha::SearchEngine;
+use Koha::SearchEngine::Search;
+use Koha::Util::Normalize qw/legacy_default remove_spaces upper_case lower_case/;
=head1 NAME
$sth->execute($matcher_id); # relying on cascading deletes to clean up everything
}
+=head2 record_type
+
+ $matcher->record_type('biblio');
+ my $record_type = $matcher->record_type();
+
+Accessor method.
+
+=cut
+
+sub record_type {
+ my $self = shift;
+ @_ ? $self->{'record_type'} = shift : $self->{'record_type'};
+}
+
=head2 threshold
$matcher->threshold(1000);
);
}
-=head2 find_matches
+=head2 get_matches
my @matches = $matcher->get_matches($marc_record, $max_matches);
foreach $match (@matches) {
my %matches = ();
- foreach my $matchpoint (@{ $self->{'matchpoints'} }) {
- my @source_keys = _get_match_keys($source_record, $matchpoint);
+ my $QParser;
+ $QParser = C4::Context->queryparser if (C4::Context->preference('UseQueryParser'));
+ foreach my $matchpoint ( @{ $self->{'matchpoints'} } ) {
+ my @source_keys = _get_match_keys( $source_record, $matchpoint );
+
next if scalar(@source_keys) == 0;
+
+ # FIXME - because of a bug in QueryParser, an expression ofthe
+ # format 'isbn:"isbn1" || isbn:"isbn2" || isbn"isbn3"...'
+ # does not get parsed correctly, so we will not
+ # do AggressiveMatchOnISBN if UseQueryParser is on
+ @source_keys = C4::Koha::GetVariationsOfISBNs(@source_keys)
+ if ( $matchpoint->{index} =~ /^isbn$/i
+ && C4::Context->preference('AggressiveMatchOnISBN') )
+ && !C4::Context->preference('UseQueryParser');
+
+ @source_keys = C4::Koha::GetVariationsOfISSNs(@source_keys)
+ if ( $matchpoint->{index} =~ /^issn$/i
+ && C4::Context->preference('AggressiveMatchOnISSN') )
+ && !C4::Context->preference('UseQueryParser');
+
# build query
- my $query = join(" or ", map { "$matchpoint->{'index'}=$_" } @source_keys);
- # FIXME only searching biblio index at the moment
- my ($error, $searchresults, $total_hits) = SimpleSearch($query, 0, $max_matches);
+ my $query;
+ my $error;
+ my $searchresults;
+ my $total_hits;
+ if ( $self->{'record_type'} eq 'biblio' ) {
+
+ if ($QParser) {
+ $query = join( " || ",
+ map { "$matchpoint->{'index'}:$_" } @source_keys );
+ }
+ else {
+ my $phr = ( C4::Context->preference('AggressiveMatchOnISBN') || C4::Context->preference('AggressiveMatchOnISSN') ) ? ',phr' : q{};
+ $query = join( " or ",
+ map { "$matchpoint->{'index'}$phr=\"$_\"" } @source_keys );
+ #NOTE: double-quote the values so you don't get a "Embedded truncation not supported" error when a term has a ? in it.
+ }
+
+ my $searcher = Koha::SearchEngine::Search->new({index => $Koha::SearchEngine::BIBLIOS_INDEX});
+ ( $error, $searchresults, $total_hits ) =
+ $searcher->simple_search_compat( $query, 0, $max_matches );
+ }
+ elsif ( $self->{'record_type'} eq 'authority' ) {
+ my $authresults;
+ my @marclist;
+ my @and_or;
+ my @excluding = [];
+ my @operator;
+ my @value;
+ foreach my $key (@source_keys) {
+ push @marclist, $matchpoint->{'index'};
+ push @and_or, 'or';
+ push @operator, 'exact';
+ push @value, $key;
+ }
+ require C4::AuthoritiesMarc;
+ ( $authresults, $total_hits ) =
+ C4::AuthoritiesMarc::SearchAuthorities(
+ \@marclist, \@and_or, \@excluding, \@operator,
+ \@value, 0, 20, undef,
+ 'AuthidAsc', 1
+ );
+ foreach my $result (@$authresults) {
+ push @$searchresults, $result->{'authid'};
+ }
+ }
- if (defined $error ) {
+ if ( defined $error ) {
warn "search failed ($query) $error";
- } else {
- foreach my $matched (@{$searchresults}) {
+ }
+ else {
+ foreach my $matched ( @{$searchresults} ) {
$matches{$matched} += $matchpoint->{'score'};
}
}
# get rid of any that don't meet the required checks
%matches = map { _passes_required_checks($source_record, $_, $self->{'required_checks'}) ? ($_ => $matches{$_}) : () }
- keys %matches;
+ keys %matches unless ($self->{'record_type'} eq 'auth');
my @results = ();
- foreach my $marcblob (keys %matches) {
- my $target_record = MARC::Record->new_from_usmarc($marcblob);
- my $result = TransformMarcToKoha(C4::Context->dbh, $target_record, '');
- # FIXME - again, bibliospecific
- # also, can search engine be induced to give just the number in the first place?
- my $record_number = $result->{'biblionumber'};
- push @results, { 'record_id' => $record_number, 'score' => $matches{$marcblob} };
+ if ($self->{'record_type'} eq 'biblio') {
+ require C4::Biblio;
+ foreach my $marcblob (keys %matches) {
+ my $target_record = C4::Search::new_record_from_zebra('biblioserver',$marcblob);
+ my $record_number;
+ my $result = C4::Biblio::TransformMarcToKoha($target_record, '');
+ $record_number = $result->{'biblionumber'};
+ push @results, { 'record_id' => $record_number, 'score' => $matches{$marcblob} };
+ }
+ } elsif ($self->{'record_type'} eq 'authority') {
+ require C4::AuthoritiesMarc;
+ foreach my $authid (keys %matches) {
+ push @results, { 'record_id' => $authid, 'score' => $matches{$authid} };
+ }
}
@results = sort { $b->{'score'} cmp $a->{'score'} } @results;
if (scalar(@results) > $max_matches) {
$result->{'matcher_id'} = $self->{'id'};
$result->{'code'} = $self->{'code'};
$result->{'description'} = $self->{'description'};
+ $result->{'record_type'} = $self->{'record_type'};
$result->{'matchpoints'} = [];
foreach my $matchpoint (@{ $self->{'matchpoints'} }) {
}
sub _get_match_keys {
+
my $source_record = shift;
my $matchpoint = shift;
my $check_only_first_repeat = @_ ? shift : 0;
# If there are two 003s and two 001s, there will be two keys:
# first 003 + first 001
# second 003 + second 001
-
+
my @keys = ();
for (my $i = 0; $i <= $#{ $matchpoint->{'components'} }; $i++) {
my $component = $matchpoint->{'components'}->[$i];
$j++;
last FIELD if $j > 0 and $check_only_first_repeat;
last FIELD if $i > 0 and $j > $#keys;
- my $key = "";
- my $string;
- if ($field->is_control_field()) {
- $string=$field->data();
+
+ my $string;
+ if ( $field->is_control_field() ) {
+ $string = $field->data();
} else {
- foreach my $subfield ($field->subfields()) {
- if (exists $component->{'subfields'}->{$subfield->[0]}) {
- $string .= " " . $subfield->[1];
- }
- }
- }
+ $string = $field->as_string(
+ join('', keys %{ $component->{ subfields } }), ' ' # ' ' as separator
+ );
+ }
+
if ($component->{'length'}>0) {
- $string= substr($string, $component->{'offset'}, $component->{'length'});
- # FIXME normalize, substr
+ $string= substr($string, $component->{'offset'}, $component->{'length'});
} elsif ($component->{'offset'}) {
- $string= substr($string, $component->{'offset'});
+ $string= substr($string, $component->{'offset'});
}
- $key = _normalize($string);
+
+ my $norms = $component->{'norms'};
+ my $key = $string;
+
+ foreach my $norm ( @{ $norms } ) {
+ if ( grep { $norm eq $_ } valid_normalization_routines() ) {
+ if ( $norm eq 'remove_spaces' ) {
+ $key = remove_spaces($key);
+ }
+ elsif ( $norm eq 'upper_case' ) {
+ $key = upper_case($key);
+ }
+ elsif ( $norm eq 'lower_case' ) {
+ $key = lower_case($key);
+ }
+ elsif ( $norm eq 'legacy_default' ) {
+ $key = legacy_default($key);
+ }
+ } else {
+ warn "Invalid normalization routine required ($norm)"
+ unless $norm eq 'none';
+ }
+ }
+
if ($i == 0) {
push @keys, $key if $key;
} else {
return $component;
}
-# FIXME - default normalizer
-sub _normalize {
- my $value = uc shift;
- $value =~ s/[.;:,\]\[\)\(\/'"]//g;
- $value =~ s/^\s+//;
- #$value =~ s/^\s+$//;
- $value =~ s/\s+$//;
- $value =~ s/\s+/ /g;
- #$value =~ s/[.;,\]\[\)\(\/"']//g;
- return $value;
+sub valid_normalization_routines {
+
+ return (
+ 'remove_spaces',
+ 'upper_case',
+ 'lower_case',
+ 'legacy_default'
+ );
}
1;