blob: 9e6340c589a893cd5dd81ee7834142cdd52b40cd (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
#!/usr/bin/env perl
use strict;
use warnings;
use 5.010;
use File::Slurp qw(read_file write_file);
use JSON;
use Text::CSV;
my @csv_lines = read_file( 'stations.csv', { binmode => ':utf8' } );
my @stations;
my $csv = Text::CSV->new;
# skip header
shift @csv_lines;
for my $line (@csv_lines) {
if ( $csv->parse($line) ) {
my ( $name, $ds100, $uic, $lat, $lon ) = $csv->fields;
if ( not $name ) {
say "Station name is mandatory -- skipping this line: $line";
next;
}
if ( not $ds100 ) {
say "DS100 is mandatory at the moment -- skipping this line: $line";
next;
}
if ( not $uic or $uic !~ m{ ^ \d+ $ }x ) {
say
"UIC is mandatory and must be numeric -- skipping this line: $line";
next;
}
my $station = {
name => $name,
ds100 => $ds100,
uic => 0 + $uic,
latlong => undef
};
if ( $lat and $lon ) {
$station->{latlong} = [ 0 + $lat, 0 + $lon ];
}
push( @stations, $station );
}
}
@stations = sort { $a->{name} cmp $b->{name} } @stations;
my $have_duplicates = 0;
my @names = map { $_->{name} } @stations;
my @ds100 = map { $_->{ds100} } @stations;
my @uic_ids = map { $_->{uic} } @stations;
for my $i ( 1 .. $#names ) {
if ( $names[ $i - 1 ] eq $names[$i] ) {
say "Duplicate station name: $names[$i]";
$have_duplicates = 1;
}
}
for my $i ( 1 .. $#ds100 ) {
if ( $ds100[ $i - 1 ] eq $ds100[$i] ) {
say "Duplicate DS100 code: $ds100[$i]";
$have_duplicates = 1;
}
}
for my $i ( 1 .. $#uic_ids ) {
if ( $uic_ids[ $i - 1 ] == $uic_ids[$i] ) {
say "Duplicate UIC ID: $uic_ids[$i]";
$have_duplicates = 1;
}
}
if ($have_duplicates) {
say "Data has NOT been converted to stations.json";
say "Please remove duplicates and run $0 again";
}
my $json_out = JSON->new->utf8->canonical->pretty->encode( [@stations] );
write_file( 'stations.json', $json_out );
|