7 # apt install libtext-csv-perl libstring-similarity-perl
9 use Data::Dump qw(dump);
10 use String::Similarity;
13 my $debug = $ENV{DEBUG};
15 my @files = qw( 1.csv 2.csv 3.csv 4.csv );
23 $use->{$_}++ foreach ( map { @{ $stat->{_}->{$_} } } @for );
24 my $duplicate = grep { $use->{$_} > 1 } keys %$use;
25 print "XXX use @for ",dump($use),$/ if $debug && $duplicate;
30 my ( $num, $key_id, $limit_sim ) = @_;
31 $limit_sim //= $ENV{LIMIT};
35 foreach my $key ( sort keys %{ $keys->{ $key_id } } ) {
36 next if $key eq $num; # XXX don't return key from input
37 my $s = similarity $num, $key, $limit_sim;
38 #warn "# $num $key $s\n";
39 if ($s > $limit_sim ) {
40 if ( exists $stat->{_}->{"$key-$key_id"} ) {
42 $use->{$_}++ foreach (
43 @{ $stat->{_}->{"$num-$key_id"} },
44 @{ $stat->{_}->{"$key-$key_id"} },
46 #print "XXX use $num $key ",dump($use);
47 my $duplicate = grep { $use->{$_} > 1 } keys %$use;
49 print "XXX $limit_sim suggest duplicate $num $key SKIP duplicate ",dump($use), $/ if $debug;
50 $stat->{suggest}->{duplicate}++;
52 push @candidates, { key => $key, s => $s };
55 print "XXX $limit_sim candidates $key missing\n" if $debug;
60 for limit 0.6 to work, we need to prefer longer results over shorter ones:
61 MERGE A0246A 3078-8326 (
62 { key => "A4065A", s => 0.666666666666667 },
63 { key => "ANDREJA0246A", s => 0.666666666666667 },
65 .++ A0246A-3078-8326 1 .++ A0246A-3078-8326 2 result val=[3, 1, 2] result_elements=3
68 if ( $#candidates > 0 ) {
69 #print "XXX candidates before = ",dump( \@candidates ),$/;
71 $b->{s} <=> $a->{s} # hi -> low
72 or length($b->{key}) <=> length($a->{key})
74 #print "XXX candidates after = ",dump( \@candidates ),$/;
79 my $keys_file = 'keys.storable';
80 if ( -e $keys_file ) {
81 #$keys = retrieve($keys_file) or die "$keys_file: $!";
82 print "LOAD $keys_file", scalar keys %$keys, "\n";
89 foreach my $val ( 1 .. 4 ) {
90 my $file = "$val.csv";
93 open(my $duplicate_fh, '>', "duplicate-$val.csv");
95 my $csv = Text::CSV->new ({ binary => 1, auto_diag => 1 });
96 open my $fh, "<:encoding(utf8)", $file or die "$file: $!";
97 while (my $row = $csv->getline ($fh)) {
98 $stat->{file}->{$file}->{lines}++;
99 $stat->{file}->{$file}->{columns}->{ $#$row }++;
101 if ( ! exists $data_headers[$val] ) {
102 $data_headers[$val] = $row;
106 my $c_id = $row->[0];
109 $row->[0] =~ s/[^\w\d]+//ig && $stat->{file}->{$file}->{corrupt_id}->{$c_id}++ && print 'c0';
110 $row->[1] =~ s/\D+//g && $stat->{file}->{$file}->{corrupt_s}->{$c_s}++ && print 'c1';
111 $row->[2] =~ s/\D+//g && $stat->{file}->{$file}->{corrupt_r}->{$c_r}++ && print 'c2';
118 my $num = uc $row->[0];
119 if ( length $num < 3 ) {
120 print "IGNORE $val ",dump($row->[ 0 .. 5 ]),$/;
121 $stat->{ignore}->{$val}++;
125 my $key_id = $row->[1] . '-' . $row->[2];
127 $stat->{A_key_id}->{$key_id}->{$val}++;
129 $keys->{ $key_id }->{ $num }++;
131 $stat->{exists}->{$val}++ if exists $stat->{_}->{ $id };
133 push @{ $stat->{_}->{ $id } }, $val;
135 if ( exists $data->{$key_id}->{$num}->{$val} ) {
136 $stat->{file}->{$file}->{duplicate_keyid_num}->{$val}++;
137 print "DUPLICATE $file $key_id $num $val\n";
139 my $old = $data->{$key_id}->{$num}->{$val};
140 print $duplicate_fh join(',', $file, @$old), "\n";
141 print $duplicate_fh join(',', $file, @$row), "\n";
142 print $duplicate_fh "\n";
145 foreach ( 0 .. $#$row ) {
146 if ( $old->[$_] ne $row->[$_] ) {
147 $diff->[$_] = [ $old->[$_], $row->[$_] ];
150 print "diff = ",dump($diff) if $diff;
151 #print "old=", dump( $data->{$key_id}->{$num}->{$val} ), $/;
152 #print "new=", dump( $row ), $/;
155 # remove .000000 from values
156 $data->{$key_id}->{$num}->{$val} = [ map { s/^(\d+)\.0+$/$1/g; $_; } @$row ];
166 # 0.9 - 0.7 -- 0.6 is too lax
167 foreach my $limit ( 0.7 ) { #, 0.6 ) {
168 warn "XXX limit $limit\n";
170 print "# total = ",scalar keys %{ $stat->{_} }, $/;
171 foreach my $id ( sort keys %{ $stat->{_} } ) {
173 my $v = $stat->{_}->{$id};
174 if ( defined $v && ref $v eq 'ARRAY' ) {
181 $u->{$_}++ foreach @val;
182 my @u_v = sort keys %$u;
184 $stat->{A_count}->{ scalar @val }++;
185 $stat->{A_count_total}++;
187 #$stat->{A_count_val_dup}->{ join(' ', @val) }++; # with duplicates
189 $stat->{A_count_val}->{ join(' ', @u_v ) }++; # without duplicates
192 if ( $#u_v < 3 ) { # single, double
193 my ( $num, $key_id ) = split(/-/,$id,2);
194 my @candidates = candidates $num => $key_id, $limit; #, 0.7; # XXX 0.9 too high, 0.8 better, 0.7 too lax
196 print "MERGE ",scalar @candidates, " $limit $num $key_id ", dump( @candidates ), ' val=', dump( \@val ), $/;
197 my @keys = map { $_->{key} } @candidates;
198 my $m_id = $id; # "$keys[0]-$key_id";
199 foreach my $i ( 0 .. $#keys ) {
200 my $id = "$keys[$i]-$key_id";
201 if ( ! exists $stat->{_}->{$id} ) {
202 print "ERROR: $num $key_id can't find $i $id";
205 # XXX I298O-4743-7996
206 if ( duplicate( $m_id => $id ) ) {
207 print "XXX duplicate2 $m_id $id\n";
208 $stat->{duplicate2}++;
213 my $o = delete $stat->{_}->{$id};
214 die "FATAL: can't find $id" if ! $o;
216 my ( $id_s, $s, $r ) = split('-', $id);
219 foreach my $val ( @$o ) {
221 push @{ $stat->{_}->{ $m_id } }, $val;
222 print "++ $m_id $val ";
223 $stat->{merge_val}->{$val}++;
226 die "ERROR merge: $val $id $m_id exists",dump( $merge_ids->{$val}->{$key_s}->{$id_s} ) if exists $merge_ids->{$val}->{$key_s}->{$id_s};
227 my $m_id_s = (split('-',$m_id,3))[0];
228 $merge_ids->{$val}->{$key_s}->{$id_s} = $m_id_s;
230 my $o_row = delete $data->{$key_s}->{$id_s}->{$val};
231 die "FATAL: $id | $m_id | data $key_s $id_s $val" unless $o_row;
232 $data->{$key_s}->{$m_id_s}->{$val} = $o_row;
235 my @not_empty = sort keys %{ $data->{$key_s}->{$id_s} };
236 die "FATAL: $id_s not empty" if @not_empty;
237 delete $data->{$key_s}->{$id_s}; # FIXME check before cleanup
239 print "result val=",dump( $stat->{_}->{ $m_id } ), " result_elements=", scalar @{ $stat->{_}->{ $m_id } }, $/;
247 print "# total after merge $limit = ",scalar keys %{ $stat->{_} }, $/;
249 foreach my $id ( sort keys %{ $stat->{_} } ) {
250 my @val; # = @{ $stat->{_}->{$id} };
251 my $v = $stat->{_}->{$id};
252 if ( defined $v && ref $v eq 'ARRAY' ) {
258 $stat->{"B${limit}_count"}->{ scalar @val }++;
259 $stat->{"B${limit}_count_total"}++;
261 #$stat->{"B${limit}_count_val_dup"}->{ join(' ', @val) }++; # with duplicates
264 $u->{$_}++ foreach @val;
265 my @u_v = sort keys %$u;
266 $stat->{"B${limit}_count_val"}->{ join(' ', @u_v ) }++; # without duplicates
272 print "# stat = ",dump( $stat );
273 #print "# keys = ",dump( $keys );
275 store $keys, $keys_file;
277 my $merge_file = 'merge.storable';
278 store $merge_ids, $merge_file;
279 #print "XXX merge_ids = ", dump($merge_ids);
281 my $out_file = 'merged.csv';
282 print "out_file $out_file";
283 open(my $out_fh, '>', $out_file);
284 foreach my $val ( 1 .. 4 ) {
285 print $out_fh join(',', map { $_ . '_' . $val } @{ $data_headers[$val] });
286 print $out_fh ',' if $val < 4;
290 foreach my $key ( sort keys %$data ) {
292 foreach my $id ( sort keys %{ $data->{$key} } ) {
293 #print $out_fh "## $id ## ";
294 foreach my $val ( 1 .. 4 ) {
295 if ( my $id_data = $data->{$key}->{$id}->{$val} ) {
296 print $out_fh join(',', @$id_data);
298 print $out_fh ( ',' x $#{ $data_headers[$val] } ) ; # FIXME +1?
300 print $out_fh ',' if $val < 4;
306 print "\n", -s $out_file, " bytes created\n";