7 # apt install libtext-csv-perl libstring-similarity-perl
9 use Data::Dump qw(dump);
10 use String::Similarity;
13 my $debug = $ENV{DEBUG};
15 my @files = qw( 1.csv 2.csv 3.csv 4.csv );
23 $use->{$_}++ foreach ( map { @{ $stat->{_}->{$_} } } @for );
24 my $duplicate = grep { $use->{$_} > 1 } keys %$use;
25 print "XXX use @for ",dump($use),$/ if $debug && $duplicate;
30 my ( $num, $key_id, $limit_sim ) = @_;
31 $limit_sim //= $ENV{LIMIT};
35 foreach my $key ( sort keys %{ $keys->{ $key_id } } ) {
36 next if $key eq $num; # XXX don't return key from input
37 my $s = similarity $num, $key, $limit_sim;
38 #warn "# $num $key $s\n";
39 if ($s > $limit_sim ) {
40 if ( exists $stat->{_}->{"$key-$key_id"} ) {
42 $use->{$_}++ foreach (
43 @{ $stat->{_}->{"$num-$key_id"} },
44 @{ $stat->{_}->{"$key-$key_id"} },
46 #print "XXX use $num $key ",dump($use);
47 my $duplicate = grep { $use->{$_} > 1 } keys %$use;
49 print "XXX $limit_sim suggest duplicate $num $key SKIP duplicate ",dump($use), $/ if $debug;
50 $stat->{suggest}->{duplicate}++;
52 push @candidates, { key => $key, s => $s };
55 print "XXX $limit_sim candidates $key missing\n" if $debug;
60 for limit 0.6 to work, we need to prefer longer results over shorter ones:
61 MERGE A0246A 3078-8326 (
62 { key => "A4065A", s => 0.666666666666667 },
63 { key => "ANDREJA0246A", s => 0.666666666666667 },
65 .++ A0246A-3078-8326 1 .++ A0246A-3078-8326 2 result val=[3, 1, 2] result_elements=3
68 if ( $#candidates > 0 ) {
69 #print "XXX candidates before = ",dump( \@candidates ),$/;
71 $b->{s} <=> $a->{s} # hi -> low
72 or length($b->{key}) <=> length($a->{key})
74 #print "XXX candidates after = ",dump( \@candidates ),$/;
79 my $keys_file = 'keys.storable';
80 if ( -e $keys_file ) {
81 #$keys = retrieve($keys_file) or die "$keys_file: $!";
82 print "LOAD $keys_file", scalar keys %$keys, "\n";
88 open(my $duplicate_fh, '>', 'duplicate.csv');
90 foreach my $val ( 1 .. 4 ) {
91 my $file = "$val.csv";
94 my $csv = Text::CSV->new ({ binary => 1, auto_diag => 1 });
95 open my $fh, "<:encoding(utf8)", $file or die "$file: $!";
96 while (my $row = $csv->getline ($fh)) {
97 $stat->{file}->{$file}->{lines}++;
98 $stat->{file}->{$file}->{columns}->{ $#$row }++;
100 if ( ! exists $data_headers[$val] ) {
101 $data_headers[$val] = $row;
105 my $c_id = $row->[0];
108 $row->[0] =~ s/[^\w\d]+//ig && $stat->{file}->{$file}->{corrupt_id}->{$c_id}++ && print 'c0';
109 $row->[1] =~ s/\D+//g && $stat->{file}->{$file}->{corrupt_s}->{$c_s}++ && print 'c1';
110 $row->[2] =~ s/\D+//g && $stat->{file}->{$file}->{corrupt_r}->{$c_r}++ && print 'c2';
117 my $num = uc $row->[0];
118 if ( length $num < 3 ) {
119 print "IGNORE $val ",dump($row->[ 0 .. 5 ]),$/;
120 $stat->{ignore}->{$val}++;
124 my $key_id = $row->[1] . '-' . $row->[2];
126 $stat->{A_key_id}->{$key_id}->{$val}++;
128 $keys->{ $key_id }->{ $num }++;
130 $stat->{exists}->{$val}++ if exists $stat->{_}->{ $id };
132 push @{ $stat->{_}->{ $id } }, $val;
134 if ( exists $data->{$key_id}->{$num}->{$val} ) {
135 $stat->{file}->{$file}->{duplicate_keyid_num}->{$val}++;
136 print "DUPLICATE $key_id $num $val old elements=", $#{ $data->{$key_id}->{$num}->{$val} }, "new ", $#$row, $/;
138 my $old = $data->{$key_id}->{$num}->{$val};
139 print $duplicate_fh join(',', $file, @$old), "\n";
140 print $duplicate_fh join(',', $file, @$row), "\n";
141 foreach ( 0 .. $#$row ) {
142 if ( $old->[$_] ne $row->[$_] ) {
143 $diff->[$_] = [ $old->[$_], $row->[$_] ];
146 print "diff = ",dump($diff) if $diff;
147 #print "old=", dump( $data->{$key_id}->{$num}->{$val} ), $/;
148 #print "new=", dump( $row ), $/;
150 $data->{$key_id}->{$num}->{$val} = $row;
160 # 0.9 - 0.7 -- 0.6 is too lax
161 foreach my $limit ( 0.7 ) { #, 0.6 ) {
162 warn "XXX limit $limit\n";
164 print "# total = ",scalar keys %{ $stat->{_} }, $/;
165 foreach my $id ( sort keys %{ $stat->{_} } ) {
167 my $v = $stat->{_}->{$id};
168 if ( defined $v && ref $v eq 'ARRAY' ) {
175 $u->{$_}++ foreach @val;
176 my @u_v = sort keys %$u;
178 $stat->{A_count}->{ scalar @val }++;
179 $stat->{A_count_total}++;
181 #$stat->{A_count_val_dup}->{ join(' ', @val) }++; # with duplicates
183 $stat->{A_count_val}->{ join(' ', @u_v ) }++; # without duplicates
186 if ( $#u_v < 3 ) { # single, double
187 my ( $num, $key_id ) = split(/-/,$id,2);
188 my @candidates = candidates $num => $key_id, $limit; #, 0.7; # XXX 0.9 too high, 0.8 better, 0.7 too lax
190 print "MERGE ",scalar @candidates, " $limit $num $key_id ", dump( @candidates ), ' val=', dump( \@val ), $/;
191 my @keys = map { $_->{key} } @candidates;
192 my $m_id = $id; # "$keys[0]-$key_id";
193 foreach my $i ( 0 .. $#keys ) {
194 my $id = "$keys[$i]-$key_id";
195 if ( ! exists $stat->{_}->{$id} ) {
196 print "ERROR: $num $key_id can't find $i $id";
199 # XXX I298O-4743-7996
200 if ( duplicate( $m_id => $id ) ) {
201 print "XXX duplicate2 $m_id $id\n";
202 $stat->{duplicate2}++;
207 my $o = delete $stat->{_}->{$id};
208 die "FATAL: can't find $id" if ! $o;
210 my ( $id_s, $s, $r ) = split('-', $id);
213 foreach my $val ( @$o ) {
215 push @{ $stat->{_}->{ $m_id } }, $val;
216 print "++ $m_id $val ";
217 $stat->{merge_val}->{$val}++;
220 die "ERROR merge: $val $id $m_id exists",dump( $merge_ids->{$val}->{$key_s}->{$id_s} ) if exists $merge_ids->{$val}->{$key_s}->{$id_s};
221 my $m_id_s = (split('-',$m_id,3))[0];
222 $merge_ids->{$val}->{$key_s}->{$id_s} = $m_id_s;
224 my $o_row = delete $data->{$key_s}->{$id_s}->{$val};
225 die "FATAL: $id | $m_id | data $key_s $id_s $val" unless $o_row;
226 $data->{$key_s}->{$m_id_s}->{$val} = $o_row;
229 my @not_empty = sort keys %{ $data->{$key_s}->{$id_s} };
230 die "FATAL: $id_s not empty" if @not_empty;
231 delete $data->{$key_s}->{$id_s}; # FIXME check before cleanup
233 print "result val=",dump( $stat->{_}->{ $m_id } ), " result_elements=", scalar @{ $stat->{_}->{ $m_id } }, $/;
241 print "# total after merge $limit = ",scalar keys %{ $stat->{_} }, $/;
243 foreach my $id ( sort keys %{ $stat->{_} } ) {
244 my @val; # = @{ $stat->{_}->{$id} };
245 my $v = $stat->{_}->{$id};
246 if ( defined $v && ref $v eq 'ARRAY' ) {
252 $stat->{"B${limit}_count"}->{ scalar @val }++;
253 $stat->{"B${limit}_count_total"}++;
255 #$stat->{"B${limit}_count_val_dup"}->{ join(' ', @val) }++; # with duplicates
258 $u->{$_}++ foreach @val;
259 my @u_v = sort keys %$u;
260 $stat->{"B${limit}_count_val"}->{ join(' ', @u_v ) }++; # without duplicates
266 print "# stat = ",dump( $stat );
267 #print "# keys = ",dump( $keys );
269 store $keys, $keys_file;
271 my $merge_file = 'merge.storable';
272 store $merge_ids, $merge_file;
273 #print "XXX merge_ids = ", dump($merge_ids);
275 my $out_file = 'merged.csv';
276 print "out_file $out_file";
277 open(my $out_fh, '>', $out_file);
278 foreach my $val ( 1 .. 4 ) {
279 print $out_fh join(',', map { $_ . '_' . $val } @{ $data_headers[$val] });
280 print $out_fh ',' if $val < 4;
284 foreach my $key ( sort keys %$data ) {
286 foreach my $id ( sort keys %{ $data->{$key} } ) {
287 #print $out_fh "## $id ## ";
288 foreach my $val ( 1 .. 4 ) {
289 if ( my $id_data = $data->{$key}->{$id}->{$val} ) {
290 print $out_fh join(',', @$id_data);
292 print $out_fh ( ',' x $#{ $data_headers[$val] } ) ; # FIXME +1?
294 print $out_fh ',' if $val < 4;
300 print "\n", -s $out_file, " bytes created\n";