summaryrefslogtreecommitdiff
path: root/lib/Travelynx/Command
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Travelynx/Command')
-rw-r--r--lib/Travelynx/Command/account.pm119
-rw-r--r--lib/Travelynx/Command/database.pm2679
-rw-r--r--lib/Travelynx/Command/dumpconfig.pm4
-rw-r--r--lib/Travelynx/Command/dumpstops.pm52
-rw-r--r--lib/Travelynx/Command/influxdb.pm204
-rw-r--r--lib/Travelynx/Command/integritycheck.pm173
-rw-r--r--lib/Travelynx/Command/maintenance.pm89
-rw-r--r--lib/Travelynx/Command/munin.pm46
-rw-r--r--lib/Travelynx/Command/traewelling.pm239
-rw-r--r--lib/Travelynx/Command/work.pm829
-rw-r--r--lib/Travelynx/Command/worker.pm27
11 files changed, 4286 insertions, 175 deletions
diff --git a/lib/Travelynx/Command/account.pm b/lib/Travelynx/Command/account.pm
new file mode 100644
index 0000000..1d17400
--- /dev/null
+++ b/lib/Travelynx/Command/account.pm
@@ -0,0 +1,119 @@
+package Travelynx::Command::account;
+
+# Copyright (C) 2021 Birte Kristina Friesel
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
+use Mojo::Base 'Mojolicious::Command';
+use UUID::Tiny qw(:std);
+
+has description => 'Add or remove user accounts';
+
+has usage => sub { shift->extract_usage };
+
+sub add_user {
+ my ( $self, $name, $email ) = @_;
+
+ my $db = $self->app->pg->db;
+
+ if ( my $error = $self->app->users->is_name_invalid( name => $name ) ) {
+ say "Cannot add account '$name': $error";
+ die;
+ }
+
+ my $token = "tmp";
+ my $password = substr( create_uuid_as_string(UUID_V4), 0, 18 );
+
+ my $tx = $db->begin;
+ my $user_id = $self->app->users->add(
+ db => $db,
+ name => $name,
+ email => $email,
+ token => $token,
+ password => $password,
+ );
+ my $success = $self->app->users->verify_registration_token(
+ db => $db,
+ uid => $user_id,
+ token => $token,
+ in_transaction => 1,
+ );
+
+ if ($success) {
+ $tx->commit;
+ say "Added user $name ($email) with UID $user_id";
+ say "Temporary password for login: $password";
+ }
+}
+
+sub delete_user {
+ my ( $self, $uid ) = @_;
+
+ my $user_data = $self->app->users->get( uid => $uid );
+
+ if ( not $user_data ) {
+ say "UID $uid does not exist.";
+ return;
+ }
+
+ $self->app->users->flag_deletion( uid => $uid );
+
+ say "User $user_data->{name} (UID $uid) has been flagged for deletion.";
+}
+
+sub really_delete_user {
+ my ( $self, $uid, $name ) = @_;
+
+ my $user_data = $self->app->users->get( uid => $uid );
+
+ if ( $user_data->{name} ne $name ) {
+ say
+ "User name $name does not match UID $uid. Account deletion aborted.";
+ return;
+ }
+
+ my $count = $self->app->users->delete( uid => $uid );
+
+ printf( "Deleted %s -- %d tokens, %d monthly stats, %d journeys\n",
+ $name, $count->{tokens}, $count->{stats}, $count->{journeys} );
+
+ return;
+}
+
+sub run {
+ my ( $self, $command, @args ) = @_;
+
+ if ( $command eq 'add' ) {
+ $self->add_user(@args);
+ }
+ elsif ( $command eq 'delete' ) {
+ $self->delete_user(@args);
+ }
+ elsif ( $command eq 'DELETE' ) {
+ $self->really_delete_user(@args);
+ }
+ else {
+ $self->help;
+ }
+}
+
+1;
+
+__END__
+
+=head1 SYNOPSIS
+
+ Usage: index.pl account add [name] [email]
+
+ Adds user [name] with a temporary password, which is shown on stdout.
+ Users can change the password once logged in.
+
+ Usage: index.pl account delete [uid]
+
+ Request deletion of user [uid]. This has the same effect as using the
+ account deletion button. The user account and all corresponding data will
+ be deleted by a maintenance run after three days.
+
+ Usage: index.pl account DELETE [uid] [name]
+
+ Immediately delete user [uid]/[name] and all associated data. Deletion is
+ irrevocable. Deletion is only performed if [name] matches the name of [uid].
diff --git a/lib/Travelynx/Command/database.pm b/lib/Travelynx/Command/database.pm
index cb5ffec..95d67f5 100644
--- a/lib/Travelynx/Command/database.pm
+++ b/lib/Travelynx/Command/database.pm
@@ -1,21 +1,31 @@
package Travelynx::Command::database;
+
+# Copyright (C) 2020-2023 Birte Kristina Friesel
+# Copyright (C) 2025 networkException <git@nwex.de>
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
use Mojo::Base 'Mojolicious::Command';
use DateTime;
+use File::Slurp qw(read_file);
+use List::Util qw();
+use JSON;
+use Travel::Status::DE::EFA;
+use Travel::Status::DE::HAFAS;
use Travel::Status::DE::IRIS::Stations;
+use Travel::Status::MOTIS;
has description => 'Initialize or upgrade database layout';
has usage => sub { shift->extract_usage };
sub get_schema_version {
- my ($db) = @_;
+ my ( $db, $key ) = @_;
my $version;
- eval {
- $version
- = $db->select( 'schema_version', ['version'] )->hash->{version};
- };
+ $key //= 'version';
+
+ eval { $version = $db->select( 'schema_version', [$key] )->hash->{$key}; };
if ($@) {
# If it failed, the version table does not exist -> run setup first.
@@ -941,8 +951,2546 @@ my @migrations = (
}
);
},
+
+ # v19 -> v20
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ create table polylines (
+ id serial not null primary key,
+ origin_eva integer not null,
+ destination_eva integer not null,
+ polyline jsonb not null
+ );
+ alter table journeys
+ add column polyline_id integer references polylines (id);
+ alter table in_transit
+ add column polyline_id integer references polylines (id);
+ drop view journeys_str;
+ drop view in_transit_str;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ polylines.polyline as polyline,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ ;
+ create or replace view in_transit_str as select
+ user_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ polylines.polyline as polyline,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ ;
+ update schema_version set version = 20;
+ }
+ );
+ },
+
+ # v20 -> v21
+ # After introducing polyline support, journey distance calculation diverged:
+ # the detail view (individual train) used the polyline, whereas monthly and
+ # yearly statistics were still based on beeline between intermediate stops.
+ # Release 1.16.0 fixes this -> ensure all caches are rebuilt.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ truncate journey_stats;
+ update schema_version set version = 21;
+ }
+ );
+ },
+
+ # v21 -> v22
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ create table traewelling (
+ user_id integer not null references users (id) primary key,
+ email varchar(256) not null,
+ push_sync boolean not null,
+ pull_sync boolean not null,
+ errored boolean,
+ token text,
+ data jsonb,
+ latest_run timestamptz
+ );
+ comment on table traewelling is 'Token and Status for Traewelling';
+ create view traewelling_str as select
+ user_id, email, push_sync, pull_sync, errored, token, data,
+ extract(epoch from latest_run) as latest_run_ts
+ from traewelling
+ ;
+ update schema_version set version = 22;
+ }
+ );
+ },
+
+ # v22 -> v23
+ # 1.18.1 fixes handling of negative cumulative arrival/departure delays
+ # and introduces additional statistics entries with pre-formatted duration
+ # strings while at it. Old cache entries lack those.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ truncate journey_stats;
+ update schema_version set version = 23;
+ }
+ );
+ },
+
+ # v23 -> v24
+ # travelynx 1.22 warns about upcoming account deletion due to inactivity
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table users add column deletion_notified timestamptz;
+ comment on column users.deletion_notified is 'Time at which warning about upcoming account deletion due to inactivity was sent';
+ update schema_version set version = 24;
+ }
+ );
+ },
+
+ # v24 -> v25
+ # travelynx 1.23 adds optional links to external services, e.g.
+ # DBF or bahn.expert departure boards
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table users add column external_services smallint;
+ comment on column users.external_services is 'Which external service to use for stationboard or routing links';
+ update schema_version set version = 25;
+ }
+ );
+ },
+
+ # v25 -> v26
+ # travelynx 1.24 adds local transit connections and needs to know targets
+ # for that to work, as local transit does not support checkins yet.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ create table localtransit (
+ user_id integer not null references users (id) primary key,
+ data jsonb
+ );
+ create view user_transit as select
+ id,
+ use_history,
+ localtransit.data as data
+ from users
+ left join localtransit on localtransit.user_id = id
+ ;
+ update schema_version set version = 26;
+ }
+ );
+ },
+
+ # v26 -> v27
+ # add list of stations that are not (or no longer) present in T-S-DE-IRIS
+ # (in this case, stations that were removed up to 1.74)
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table schema_version
+ add column iris varchar(12);
+ create table stations (
+ eva int not null primary key,
+ ds100 varchar(16) not null,
+ name varchar(64) not null,
+ lat real not null,
+ lon real not null,
+ source smallint not null,
+ archived bool not null
+ );
+ update schema_version set version = 27;
+ update schema_version set iris = '0';
+ }
+ );
+ },
+
+ # v27 -> v28
+ # add ds100, name, and lat/lon from stations table to journeys_str / in_transit_str
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view journeys_str;
+ drop view in_transit_str;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polylines.polyline as polyline,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ ;
+ create view in_transit_str as select
+ user_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polylines.polyline as polyline,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ ;
+ update schema_version set version = 28;
+ }
+ );
+ },
+
+ # v28 -> v29
+ # add pre-migration travelynx version. This way, a failed migration can
+ # print a helpful "git checkout" command.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table schema_version
+ add column travelynx varchar(64);
+ update schema_version set version = 29;
+ }
+ );
+ },
+
+ # v29 -> v30
+ # change layout of stops in in_transit and journeys "route" lists.
+ # Old layout: A mixture of [name, {data}, undef/"additional"/"cancelled"], [name, timestamp, timestamp], and [name]
+ # New layout: [name, eva, {data including isAdditional/isCancelled}]
+ # Combined with a maintenance task that adds eva IDs to past stops, this will allow for more resilience against station name changes.
+ # It will also help increase the performance of distance and map calculation
+ sub {
+ my ($db) = @_;
+ my $json = JSON->new;
+
+ say 'Adjusting route schema, this may take a while ...';
+
+ my $res = $db->select( 'in_transit_str', [ 'route', 'user_id' ] );
+ while ( my $row = $res->expand->hash ) {
+ my @new_route;
+ for my $stop ( @{ $row->{route} } ) {
+ push( @new_route, [ $stop->[0], undef, {} ] );
+ }
+ $db->update(
+ 'in_transit',
+ { route => $json->encode( \@new_route ) },
+ { user_id => $row->{user_id} }
+ );
+ }
+
+ my $total
+ = $db->select( 'journeys', 'count(*) as count' )->hash->{count};
+ my $count = 0;
+
+ $res = $db->select( 'journeys_str', [ 'route', 'journey_id' ] );
+ while ( my $row = $res->expand->hash ) {
+ my @new_route;
+
+ for my $stop ( @{ $row->{route} } ) {
+ if ( @{$stop} == 1 ) {
+ push( @new_route, [ $stop->[0], undef, {} ] );
+ }
+ elsif (
+ ( not defined $stop->[1] or $stop->[1] =~ m{ ^ \d+ $ }x )
+ and
+ ( not defined $stop->[2] or $stop->[2] =~ m{ ^ \d+ $ }x )
+ )
+ {
+ push( @new_route, [ $stop->[0], undef, {} ] );
+ }
+ else {
+ my $attr = $stop->[1] // {};
+ if ( $stop->[2] and $stop->[2] eq 'additional' ) {
+ $attr->{isAdditional} = 1;
+ }
+ elsif ( $stop->[2] and $stop->[2] eq 'cancelled' ) {
+ $attr->{isCancelled} = 1;
+ }
+ push( @new_route, [ $stop->[0], undef, $attr ] );
+ }
+ }
+
+ $db->update(
+ 'journeys',
+ { route => $json->encode( \@new_route ) },
+ { id => $row->{journey_id} }
+ );
+
+ if ( $count++ % 10000 == 0 ) {
+ printf( " %2.0f%% complete\n", $count * 100 / $total );
+ }
+ }
+ say ' done';
+ $db->query(
+ qq{
+ update schema_version set version = 30;
+ }
+ );
+ },
+
+ # v30 -> v31
+ # travelynx v1.29.17 introduces links to conflicting journeys.
+ # These require changes to statistics data.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ truncate journey_stats;
+ update schema_version set version = 31;
+ }
+ );
+ },
+
+ # v31 -> v32
+ # travelynx v1.29.18 improves above-mentioned conflict links.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ truncate journey_stats;
+ update schema_version set version = 32;
+ }
+ );
+ },
+
+ # v32 -> v33
+ # add optional per-status visibility that overrides global visibility
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table journeys add column visibility smallint;
+ alter table in_transit add column visibility smallint;
+ drop view journeys_str;
+ drop view in_transit_str;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polylines.polyline as polyline,
+ visibility,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ ;
+ create view in_transit_str as select
+ user_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polylines.polyline as polyline,
+ visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ ;
+ }
+ );
+ my $res = $db->select( 'users', [ 'id', 'public_level' ] );
+ while ( my $row = $res->hash ) {
+ my $old_level = $row->{public_level};
+
+ # status default: unlisted
+ my $new_level = 30;
+ if ( $old_level & 0x01 ) {
+
+ # status: account required
+ $new_level = 80;
+ }
+ if ( $old_level & 0x02 ) {
+
+ # status: public
+ $new_level = 100;
+ }
+ if ( $old_level & 0x04 ) {
+
+ # comment public
+ $new_level |= 0x80;
+ }
+ if ( $old_level & 0x10 ) {
+
+ # past: account required
+ $new_level |= 0x100;
+ }
+ if ( $old_level & 0x20 ) {
+
+ # past: public
+ $new_level |= 0x200;
+ }
+ if ( $old_level & 0x40 ) {
+
+ # past: infinite (default is 4 weeks)
+ $new_level |= 0x400;
+ }
+ my $r = $db->update(
+ 'users',
+ { public_level => $new_level },
+ { id => $row->{id} }
+ )->rows;
+ if ( $r != 1 ) {
+ die("oh no");
+ }
+ }
+ $db->update( 'schema_version', { version => 33 } );
+ },
+
+ # v33 -> v34
+ # add polyline_id to in_transit_str
+ # (https://github.com/derf/travelynx/issues/66)
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view in_transit_str;
+ create view in_transit_str as select
+ user_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ ;
+ update schema_version set version = 34;
+ }
+ );
+ },
+
+ # v34 -> v35
+ sub {
+ my ($db) = @_;
+
+ # 1 : follows
+ # 2 : follow requested
+ # 3 : is blocked by
+ $db->query(
+ qq{
+ create table relations (
+ subject_id integer not null references users (id),
+ predicate smallint not null,
+ object_id integer not null references users (id),
+ primary key (subject_id, object_id)
+ );
+ create view followers as select
+ relations.object_id as self_id,
+ users.id as id,
+ users.name as name
+ from relations
+ join users on relations.subject_id = users.id
+ where predicate = 1;
+ create view followees as select
+ relations.subject_id as self_id,
+ users.id as id,
+ users.name as name
+ from relations
+ join users on relations.object_id = users.id
+ where predicate = 1;
+ create view follow_requests as select
+ relations.object_id as self_id,
+ users.id as id,
+ users.name as name
+ from relations
+ join users on relations.subject_id = users.id
+ where predicate = 2;
+ create view blocked_users as select
+ relations.object_id as self_id,
+ users.id as id,
+ users.name as name
+ from relations
+ join users on relations.subject_id = users.id
+ where predicate = 3;
+ update schema_version set version = 35;
+ }
+ );
+ },
+
+ # v35 -> v36
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table relations
+ add column ts timestamptz not null;
+ alter table users
+ add column accept_follows smallint default 0;
+ update schema_version set version = 36;
+ }
+ );
+ },
+
+ # v36 -> v37
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table users
+ add column notifications smallint default 0,
+ add column profile jsonb;
+ update schema_version set version = 37;
+ }
+ );
+ },
+
+ # v37 -> v38
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view followers;
+ create view followers as select
+ relations.object_id as self_id,
+ users.id as id,
+ users.name as name,
+ users.accept_follows as accept_follows,
+ r2.predicate as inverse_predicate
+ from relations
+ join users on relations.subject_id = users.id
+ left join relations as r2 on relations.subject_id = r2.object_id
+ where relations.predicate = 1;
+ update schema_version set version = 38;
+ }
+ );
+ },
+
+ # v38 -> v39
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view followers;
+ create view followers as select
+ relations.object_id as self_id,
+ users.id as id,
+ users.name as name,
+ users.accept_follows as accept_follows,
+ r2.predicate as inverse_predicate
+ from relations
+ join users on relations.subject_id = users.id
+ left join relations as r2
+ on relations.subject_id = r2.object_id
+ and relations.object_id = r2.subject_id
+ where relations.predicate = 1;
+ update schema_version set version = 39;
+ }
+ );
+ },
+
+ # v39 -> v40
+ # distinguish between public / travelynx / followers / private visibility
+ # for the history page, just like status visibility.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table users alter public_level type integer;
+ }
+ );
+ my $res = $db->select( 'users', [ 'id', 'public_level' ] );
+ while ( my $row = $res->hash ) {
+ my $old_level = $row->{public_level};
+
+ # checkin and comment visibility remain unchanged
+ my $new_level = $old_level & 0x00ff;
+
+ # past: account required
+ if ( $old_level & 0x100 ) {
+ $new_level |= 80 << 8;
+ }
+
+ # past: public
+ elsif ( $old_level & 0x200 ) {
+ $new_level |= 100 << 8;
+ }
+
+ # past: private
+ else {
+ $new_level |= 10 << 8;
+ }
+
+ # past: infinite (default is 4 weeks)
+ if ( $old_level & 0x400 ) {
+ $new_level |= 0x10000;
+ }
+
+ # show past journey on status page
+ if ( $old_level & 0x800 ) {
+ $new_level |= 0x8000;
+ }
+
+ my $r = $db->update(
+ 'users',
+ { public_level => $new_level },
+ { id => $row->{id} }
+ )->rows;
+ if ( $r != 1 ) {
+ die("oh no");
+ }
+ }
+ $db->update( 'schema_version', { version => 40 } );
+ },
+
+ # v40 -> v41
+ # Compute effective visibility in in_transit_str and journeys_str.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view in_transit_str;
+ drop view journeys_str;
+ create view in_transit_str as select
+ user_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ ;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ ;
+ update schema_version set version = 41;
+ }
+ );
+ },
+
+ # v41 -> v42
+ # adds current followee checkins
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ create view follows_in_transit as select
+ r1.subject_id as follower_id, user_id as followee_id,
+ users.name as followee_name,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join relations as r1 on r1.predicate = 1 and r1.object_id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ ;
+ update schema_version set version = 42;
+ }
+ );
+ },
+
+ # v42 -> v43
+ # list sent and received follow requests
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter view follow_requests rename to rx_follow_requests;
+ create view tx_follow_requests as select
+ relations.subject_id as self_id,
+ users.id as id,
+ users.name as name
+ from relations
+ join users on relations.object_id = users.id
+ where predicate = 2;
+ update schema_version set version = 43;
+ }
+ );
+ },
+
+ # v43 -> v44
+ # show inverse relation in followees as well
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view followees;
+ create view followees as select
+ relations.subject_id as self_id,
+ users.id as id,
+ users.name as name,
+ r2.predicate as inverse_predicate
+ from relations
+ join users on relations.object_id = users.id
+ left join relations as r2
+ on relations.subject_id = r2.object_id
+ and relations.object_id = r2.subject_id
+ where relations.predicate = 1;
+ update schema_version set version = 44;
+ }
+ );
+ },
+
+ # v44 -> v45
+ # prepare for HAFAS support: many HAFAS stations do not have DS100 codes
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table stations alter column ds100 drop not null;
+ update schema_version set version = 45;
+ }
+ );
+ },
+
+ # v45 -> v46
+ # Switch to Traewelling OAuth2 authentication.
+ # E-Mail is no longer needed.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view traewelling_str;
+ create view traewelling_str as select
+ user_id, push_sync, pull_sync, errored, token, data,
+ extract(epoch from latest_run) as latest_run_ts
+ from traewelling
+ ;
+ alter table traewelling drop column email;
+ update schema_version set version = 46;
+ }
+ );
+ },
+
+ # v46 -> v47
+ # sort followee checkins by checkin time
+ # (descending / most recent first, like a timeline)
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view follows_in_transit;
+ create view follows_in_transit as select
+ r1.subject_id as follower_id, user_id as followee_id,
+ users.name as followee_name,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join relations as r1 on r1.predicate = 1 and r1.object_id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ order by checkin_time desc
+ ;
+ update schema_version set version = 47;
+ }
+ );
+ },
+
+ # v47 -> v48
+ # Store Traewelling refresh tokens; store expiry as explicit column.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table traewelling
+ add column refresh_token text,
+ add column expiry timestamptz;
+ drop view traewelling_str;
+ create view traewelling_str as select
+ user_id, push_sync, pull_sync, errored,
+ token, refresh_token, data,
+ extract(epoch from latest_run) as latest_run_ts,
+ extract(epoch from expiry) as expiry_ts
+ from traewelling
+ ;
+ update schema_version set version = 48;
+ }
+ );
+ },
+
+ # v48 -> v49
+ # create indexes for common queries
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ create index uid_real_departure_idx on journeys (user_id, real_departure);
+ update schema_version set version = 49;
+ }
+ );
+ },
+
+ # v49 -> v50
+ # travelynx 2.0 introduced proper HAFAS support, so there is no need for
+ # the 'FYI, here is some HAFAS data' kludge anymore.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view user_transit;
+ drop table localtransit;
+ update schema_version set version = 50;
+ }
+ );
+ },
+
+ # v50 -> v51
+ # store related HAFAS stations
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ create table related_stations (
+ eva integer not null,
+ meta integer not null,
+ unique (eva, meta)
+ );
+ create index rel_eva on related_stations (eva);
+ update schema_version set version = 51;
+ }
+ );
+ },
+
+ # v51 -> v52
+ # Explicitly encode backend type; preparation for multiple HAFAS backends
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ create table backends (
+ id smallserial not null primary key,
+ iris bool not null,
+ hafas bool not null,
+ efa bool not null,
+ ris bool not null,
+ name varchar(32) not null,
+ unique (iris, hafas, efa, ris, name)
+ );
+ insert into backends (id, iris, hafas, efa, ris, name) values (0, true, false, false, false, '');
+ insert into backends (id, iris, hafas, efa, ris, name) values (1, false, true, false, false, 'DB');
+ alter sequence backends_id_seq restart with 2;
+ alter table in_transit add column backend_id smallint references backends (id);
+ alter table journeys add column backend_id smallint references backends (id);
+ update in_transit set backend_id = 0 where train_id not like '%|%';
+ update journeys set backend_id = 0 where train_id not like '%|%';
+ update in_transit set backend_id = 1 where train_id like '%|%';
+ update journeys set backend_id = 1 where train_id like '%|%';
+ update journeys set backend_id = 1 where train_id = 'manual';
+ alter table in_transit alter column backend_id set not null;
+ alter table journeys alter column backend_id set not null;
+
+ drop view in_transit_str;
+ drop view journeys_str;
+ create view in_transit_str as select
+ user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.ris as is_ris,
+ backend.name as backend_name,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ left join backends as backend on backend_id = backend.id
+ ;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.ris as is_ris,
+ backend.name as backend_name,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ left join backends as backend on backend_id = backend.id
+ ;
+ update schema_version set version = 52;
+ }
+ );
+ },
+
+ # v52 -> v53
+ # Extend train_id to be compatible with more recent HAFAS versions
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view in_transit_str;
+ drop view journeys_str;
+ drop view follows_in_transit;
+ alter table in_transit alter column train_id type varchar(384);
+ alter table journeys alter column train_id type varchar(384);
+ create view in_transit_str as select
+ user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.ris as is_ris,
+ backend.name as backend_name,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ left join backends as backend on backend_id = backend.id
+ ;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.ris as is_ris,
+ backend.name as backend_name,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ left join backends as backend on backend_id = backend.id
+ ;
+ create view follows_in_transit as select
+ r1.subject_id as follower_id, user_id as followee_id,
+ users.name as followee_name,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join relations as r1 on r1.predicate = 1 and r1.object_id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva
+ left join stations as arr_station on checkout_station_id = arr_station.eva
+ order by checkin_time desc
+ ;
+ update schema_version set version = 53;
+ }
+ );
+ },
+
+ # v53 -> v54
+ # Retrofit lat/lon data onto routes logged before v2.7.8; ensure
+ # consistent name and eva entries as well.
+ sub {
+ my ($db) = @_;
+
+ say
+'Adding lat/lon to routes of journeys logged before v2.7.8 and improving consistency of name/eva data in very old route entries.';
+ say 'This may take a while ...';
+
+ my %legacy_to_new;
+ if ( -r 'share/old_station_names.json' ) {
+ %legacy_to_new = %{ JSON->new->utf8->decode(
+ scalar read_file('share/old_station_names.json')
+ )
+ };
+ }
+
+ my %latlon_by_eva;
+ my %latlon_by_name;
+ my $res = $db->select( 'stations', [ 'name', 'eva', 'lat', 'lon' ] );
+ while ( my $row = $res->hash ) {
+ $latlon_by_eva{ $row->{eva} } = $row;
+ $latlon_by_name{ $row->{name} } = $row;
+ }
+
+ my $total
+ = $db->select( 'journeys', 'count(*) as count' )->hash->{count};
+ my $count = 0;
+ my $total_no_eva = 0;
+ my $total_no_latlon = 0;
+
+ my $json = JSON->new;
+
+ $res = $db->select( 'journeys_str', [ 'route', 'journey_id' ] );
+ while ( my $row = $res->expand->hash ) {
+ my $no_eva = 0;
+ my $no_latlon = 0;
+ my $changed = 0;
+ my @route = @{ $row->{route} };
+ for my $stop (@route) {
+ my $name = $stop->[0];
+ my $eva = $stop->[1];
+
+ if ( not $eva and $stop->[2]{eva} ) {
+ $eva = $stop->[1] = 0 + $stop->[2]{eva};
+ }
+
+ if ( $stop->[2]{eva} and $eva and $eva == $stop->[2]{eva} ) {
+ delete $stop->[2]{eva};
+ }
+
+ if ( $stop->[2]{name} and $name eq $stop->[2]{name} ) {
+ delete $stop->[2]{name};
+ }
+
+ if ( not $eva ) {
+ if ( $latlon_by_name{$name} ) {
+ $eva = $stop->[1] = $latlon_by_name{$name}{eva};
+ $changed = 1;
+ }
+ elsif ( $legacy_to_new{$name}
+ and $latlon_by_name{ $legacy_to_new{$name} } )
+ {
+ $eva = $stop->[1]
+ = $latlon_by_name{ $legacy_to_new{$name} }{eva};
+ $stop->[2]{lat}
+ = $latlon_by_name{ $legacy_to_new{$name} }{lat};
+ $stop->[2]{lon}
+ = $latlon_by_name{ $legacy_to_new{$name} }{lon};
+ $changed = 1;
+ }
+ else {
+ $no_eva = 1;
+ }
+ }
+
+ if ( $stop->[2]{lat} and $stop->[2]{lon} ) {
+ next;
+ }
+
+ if ( $eva and $latlon_by_eva{$eva} ) {
+ $stop->[2]{lat} = $latlon_by_eva{$eva}{lat};
+ $stop->[2]{lon} = $latlon_by_eva{$eva}{lon};
+ $changed = 1;
+ }
+ elsif ( $latlon_by_name{$name} ) {
+ $stop->[2]{lat} = $latlon_by_name{$name}{lat};
+ $stop->[2]{lon} = $latlon_by_name{$name}{lon};
+ $changed = 1;
+ }
+ elsif ( $legacy_to_new{$name}
+ and $latlon_by_name{ $legacy_to_new{$name} } )
+ {
+ $stop->[2]{lat}
+ = $latlon_by_name{ $legacy_to_new{$name} }{lat};
+ $stop->[2]{lon}
+ = $latlon_by_name{ $legacy_to_new{$name} }{lon};
+ $changed = 1;
+ }
+ else {
+ $no_latlon = 1;
+ }
+ }
+ if ($no_eva) {
+ $total_no_eva += 1;
+ }
+ if ($no_latlon) {
+ $total_no_latlon += 1;
+ }
+ if ($changed) {
+ $db->update(
+ 'journeys',
+ {
+ route => $json->encode( \@route ),
+ },
+ { id => $row->{journey_id} }
+ );
+ }
+ if ( $count++ % 10000 == 0 ) {
+ printf( " %2.0f%% complete\n", $count * 100 / $total );
+ }
+ }
+ say ' done';
+ if ($total_no_eva) {
+ printf( " (%d of %d routes still lack some EVA IDs)\n",
+ $total_no_eva, $total );
+ }
+ if ($total_no_latlon) {
+ printf( " (%d of %d routes still lack some lat/lon data)\n",
+ $total_no_latlon, $total );
+ }
+
+ $db->query(
+ qq{
+ update schema_version set version = 54;
+ }
+ );
+ },
+
+ # v54 -> v55
+ # do not share stations between backends
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table schema_version add column hafas varchar(12);
+ alter table users drop column external_services;
+ alter table users add column backend_id smallint references backends (id) default 1;
+ alter table stations drop constraint stations_pkey;
+ alter table stations add unique (eva, source);
+ create index eva_by_source on stations (eva, source);
+ create index eva on stations (eva);
+ alter table related_stations drop constraint related_stations_eva_meta_key;
+ drop index rel_eva;
+ alter table related_stations add column backend_id smallint;
+ update related_stations set backend_id = 1;
+ alter table related_stations alter column backend_id set not null;
+ alter table related_stations add constraint backend_fk foreign key (backend_id) references backends (id);
+ alter table related_stations add unique (eva, meta, backend_id);
+ create index related_stations_eva_backend_key on related_stations (eva, backend_id);
+ }
+ );
+
+ # up until now, IRIS and DB HAFAS shared stations, with IRIS taking
+ # preference. As of v2.7, this is no longer the case. However, old DB
+ # HAFAS journeys may still reference IRIS-specific stations. So, we
+ # make all IRIS stations available as DB HAFAS stations as well.
+ my $total
+ = $db->select( 'stations', 'count(*) as count', { source => 0 } )
+ ->hash->{count};
+ my $count = 0;
+
+ # Caveat: If this is a fresh installation, there are no IRIS stations
+ # in the database yet. So we have to populate it first.
+ if ( not $total ) {
+ say
+'Preparing to untangle IRIS / HAFAS stations, this may take a while ...';
+ $total = scalar Travel::Status::DE::IRIS::Stations::get_stations();
+ for my $s ( Travel::Status::DE::IRIS::Stations::get_stations() ) {
+ my ( $ds100, $name, $eva, $lon, $lat ) = @{$s};
+ if ( $ENV{__TRAVELYNX_TEST_MINI_IRIS}
+ and ( $eva < 8000000 or $eva > 8000100 ) )
+ {
+ next;
+ }
+ $db->insert(
+ 'stations',
+ {
+ eva => $eva,
+ ds100 => $ds100,
+ name => $name,
+ lat => $lat,
+ lon => $lon,
+ source => 0,
+ archived => 0
+ },
+ );
+ if ( $count++ % 1000 == 0 ) {
+ printf( " %2.0f%% complete\n", $count * 100 / $total );
+ }
+ }
+ $count = 0;
+ }
+
+ say 'Untangling IRIS / HAFAS stations, this may take a while ...';
+ my $res = $db->query(
+ qq{
+ select eva, ds100, name, lat, lon, archived
+ from stations
+ where source = 0;
+ }
+ );
+ while ( my $row = $res->hash ) {
+ $db->insert(
+ 'stations',
+ {
+ eva => $row->{eva},
+ ds100 => $row->{ds100},
+ name => $row->{name},
+ lat => $row->{lat},
+ lon => $row->{lon},
+ archived => $row->{archived},
+ source => 1,
+ }
+ );
+ if ( $count++ % 1000 == 0 ) {
+ printf( " %2.0f%% complete\n", $count * 100 / $total );
+ }
+ }
+
+ # Occasionally, IRIS checkins refer to stations that are not part of
+ # the Travel::Status::DE::IRIS database. Add those as HAFAS stops to
+ # satisfy the upcoming foreign key constraints.
+
+ my %iris_has_eva;
+ $res = $db->query(qq{select eva from stations where source = 0;});
+ while ( my $row = $res->hash ) {
+ $iris_has_eva{ $row->{eva} } = 1;
+ }
+
+ my %hafas_by_eva;
+ $res = $db->query(qq{select * from stations where source = 1;});
+ while ( my $row = $res->hash ) {
+ $hafas_by_eva{ $row->{eva} } = $row;
+ }
+
+ my @iris_ref_stations;
+ $res
+ = $db->query(
+qq{select distinct checkin_station_id from journeys where backend_id = 0;}
+ );
+ while ( my $row = $res->hash ) {
+ push( @iris_ref_stations, $row->{checkin_station_id} );
+ }
+ $res
+ = $db->query(
+qq{select distinct checkout_station_id from journeys where backend_id = 0;}
+ );
+ while ( my $row = $res->hash ) {
+ push( @iris_ref_stations, $row->{checkout_station_id} );
+ }
+ $res
+ = $db->query(
+qq{select distinct checkin_station_id from in_transit where backend_id = 0;}
+ );
+ while ( my $row = $res->hash ) {
+ push( @iris_ref_stations, $row->{checkin_station_id} );
+ }
+ $res
+ = $db->query(
+qq{select distinct checkout_station_id from in_transit where backend_id = 0;}
+ );
+ while ( my $row = $res->hash ) {
+ if ( $row->{checkout_station_id} ) {
+ push( @iris_ref_stations, $row->{checkout_station_id} );
+ }
+ }
+
+ @iris_ref_stations = List::Util::uniq @iris_ref_stations;
+
+ for my $station (@iris_ref_stations) {
+ if ( not $iris_has_eva{$station} ) {
+ $hafas_by_eva{$station}{source} = 0;
+ $hafas_by_eva{$station}{archived} = 1;
+ $db->insert( 'stations', $hafas_by_eva{$station} );
+ }
+ }
+
+ $db->query(
+ qq{
+ alter table in_transit add constraint in_transit_checkin_eva_fk
+ foreign key (checkin_station_id, backend_id)
+ references stations (eva, source);
+ alter table in_transit add constraint in_transit_checkout_eva_fk
+ foreign key (checkout_station_id, backend_id)
+ references stations (eva, source);
+ alter table journeys add constraint journeys_checkin_eva_fk
+ foreign key (checkin_station_id, backend_id)
+ references stations (eva, source);
+ alter table journeys add constraint journeys_checkout_eva_fk
+ foreign key (checkout_station_id, backend_id)
+ references stations (eva, source);
+ drop view in_transit_str;
+ drop view journeys_str;
+ drop view follows_in_transit;
+ create view in_transit_str as select
+ user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.ris as is_ris,
+ backend.name as backend_name, in_transit.backend_id as backend_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and in_transit.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and in_transit.backend_id = arr_station.source
+ left join backends as backend on in_transit.backend_id = backend.id
+ ;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.ris as is_ris,
+ backend.name as backend_name, journeys.backend_id as backend_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and journeys.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and journeys.backend_id = arr_station.source
+ left join backends as backend on journeys.backend_id = backend.id
+ ;
+ create view follows_in_transit as select
+ r1.subject_id as follower_id, user_id as followee_id,
+ users.name as followee_name,
+ train_type, train_line, train_no, train_id,
+ in_transit.backend_id as backend_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join relations as r1 on r1.predicate = 1 and r1.object_id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and in_transit.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and in_transit.backend_id = arr_station.source
+ order by checkin_time desc
+ ;
+ create view users_with_backend as select
+ users.id as id, users.name as name, status, public_level,
+ email, password, registered_at, last_seen,
+ deletion_requested, deletion_notified, use_history,
+ accept_follows, notifications, profile, backend_id, iris,
+ hafas, efa, ris, backend.name as backend_name
+ from users
+ left join backends as backend on users.backend_id = backend.id
+ ;
+ update schema_version set version = 55;
+ update schema_version set hafas = '0';
+ }
+ );
+ say
+ 'This travelynx instance now has support for non-DB HAFAS backends.';
+ say
+'If the migration fails due to a deadlock, re-run it after stopping all background workers';
+ },
+
+ # v55 -> v56
+ # include backend data in dumpstops command
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ create view stations_str as
+ select stations.name as name,
+ eva, lat, lon,
+ backends.name as backend,
+ iris as is_iris,
+ hafas as is_hafas,
+ efa as is_efa,
+ ris as is_ris
+ from stations
+ left join backends
+ on source = backends.id;
+ update schema_version set version = 56;
+ }
+ );
+ },
+
+ # v56 -> v57
+ # Berlin Hbf used to be divided between "Berlin Hbf" (8011160) and "Berlin
+ # Hbf (tief)" (8098160). Since 2024, both are called "Berlin Hbf".
+ # As there are some places in the IRIS backend where station names are
+ # mapped to EVA IDs, this is not good. As of 2.8.21, travelynx deals with
+ # this IRIS edge case (and probably similar edge cases in Karlsruhe).
+ # Rebuild stats to ensure no bogus data is in there.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ truncate journey_stats;
+ update schema_version set version = 57;
+ }
+ );
+ },
+
+ # v57 -> v58
+ # Add backend data to follows_in_transit
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view follows_in_transit;
+ create view follows_in_transit as select
+ r1.subject_id as follower_id, user_id as followee_id,
+ users.name as followee_name,
+ train_type, train_line, train_no, train_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.ris as is_ris,
+ backend.name as backend_name, in_transit.backend_id as backend_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join relations as r1 on r1.predicate = 1 and r1.object_id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and in_transit.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and in_transit.backend_id = arr_station.source
+ left join backends as backend on in_transit.backend_id = backend.id
+ order by checkin_time desc
+ ;
+ update schema_version set version = 58;
+ }
+ );
+ },
+
+ # v58 -> v59
+ # DB HAFAS is dead. Default to DB IRIS for now.
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table users alter column backend_id set default 0;
+ update schema_version set version = 59;
+ }
+ );
+ },
+
+ # v59 -> v60
+ # Add bahn.de / DBRIS backend
+ sub {
+ my ($db) = @_;
+ $db->insert(
+ 'backends',
+ {
+ iris => 0,
+ hafas => 0,
+ efa => 0,
+ ris => 1,
+ name => 'bahn.de',
+ },
+ );
+ $db->query(
+ qq{
+ update schema_version set version = 60;
+ }
+ );
+ },
+
+ # v60 -> v61
+ # Rename "ris" / "is_ris" to "dbris" / "is_dbris", as it is DB-specific
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view in_transit_str;
+ drop view journeys_str;
+ drop view users_with_backend;
+ drop view follows_in_transit;
+ alter table backends rename column ris to dbris;
+ create view in_transit_str as select
+ user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.dbris as is_dbris,
+ backend.name as backend_name, in_transit.backend_id as backend_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and in_transit.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and in_transit.backend_id = arr_station.source
+ left join backends as backend on in_transit.backend_id = backend.id
+ ;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.dbris as is_dbris,
+ backend.name as backend_name, journeys.backend_id as backend_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and journeys.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and journeys.backend_id = arr_station.source
+ left join backends as backend on journeys.backend_id = backend.id
+ ;
+ create view users_with_backend as select
+ users.id as id, users.name as name, status, public_level,
+ email, password, registered_at, last_seen,
+ deletion_requested, deletion_notified, use_history,
+ accept_follows, notifications, profile, backend_id, iris,
+ hafas, efa, dbris, backend.name as backend_name
+ from users
+ left join backends as backend on users.backend_id = backend.id
+ ;
+ create view follows_in_transit as select
+ r1.subject_id as follower_id, user_id as followee_id,
+ users.name as followee_name,
+ train_type, train_line, train_no, train_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.dbris as is_dbris,
+ backend.name as backend_name, in_transit.backend_id as backend_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join relations as r1 on r1.predicate = 1 and r1.object_id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and in_transit.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and in_transit.backend_id = arr_station.source
+ left join backends as backend on in_transit.backend_id = backend.id
+ order by checkin_time desc
+ ;
+ update schema_version set version = 61;
+ }
+ );
+ },
+
+ # v61 -> v62
+ # Add MOTIS backend type, add RNV and transitous MOTIS backends
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table backends add column motis bool default false;
+ alter table schema_version add column motis varchar(12);
+
+ create table stations_external_ids (
+ eva serial not null primary key,
+ backend_id smallint not null,
+ external_id text not null,
+
+ unique (backend_id, external_id),
+ foreign key (eva, backend_id) references stations (eva, source)
+ );
+
+ create view stations_with_external_ids as select
+ stations.*, stations_external_ids.external_id
+ from stations
+ left join stations_external_ids on
+ stations.eva = stations_external_ids.eva and
+ stations.source = stations_external_ids.backend_id
+ ;
+
+ drop view in_transit_str;
+ drop view journeys_str;
+ drop view users_with_backend;
+ drop view follows_in_transit;
+
+ create view in_transit_str as select
+ user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.dbris as is_dbris,
+ backend.motis as is_motis,
+ backend.name as backend_name, in_transit.backend_id as backend_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ dep_station_external_id.external_id as dep_external_id,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ arr_station_external_id.external_id as arr_external_id,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and in_transit.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and in_transit.backend_id = arr_station.source
+ left join stations_external_ids as dep_station_external_id on checkin_station_id = dep_station_external_id.eva and in_transit.backend_id = dep_station_external_id.backend_id
+ left join stations_external_ids as arr_station_external_id on checkout_station_id = arr_station_external_id.eva and in_transit.backend_id = arr_station_external_id.backend_id
+ left join backends as backend on in_transit.backend_id = backend.id
+ ;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.dbris as is_dbris,
+ backend.motis as is_motis,
+ backend.name as backend_name, journeys.backend_id as backend_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ dep_station_external_id.external_id as dep_external_id,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ arr_station_external_id.external_id as arr_external_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and journeys.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and journeys.backend_id = arr_station.source
+ left join stations_external_ids as dep_station_external_id on checkin_station_id = dep_station_external_id.eva and journeys.backend_id = dep_station_external_id.backend_id
+ left join stations_external_ids as arr_station_external_id on checkout_station_id = arr_station_external_id.eva and journeys.backend_id = arr_station_external_id.backend_id
+ left join backends as backend on journeys.backend_id = backend.id
+ ;
+ create view users_with_backend as select
+ users.id as id, users.name as name, status, public_level,
+ email, password, registered_at, last_seen,
+ deletion_requested, deletion_notified, use_history,
+ accept_follows, notifications, profile, backend_id, iris,
+ hafas, efa, dbris, motis, backend.name as backend_name
+ from users
+ left join backends as backend on users.backend_id = backend.id
+ ;
+ create view follows_in_transit as select
+ r1.subject_id as follower_id, user_id as followee_id,
+ users.name as followee_name,
+ train_type, train_line, train_no, train_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.dbris as is_dbris,
+ backend.motis as is_motis,
+ backend.name as backend_name, in_transit.backend_id as backend_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join relations as r1 on r1.predicate = 1 and r1.object_id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and in_transit.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and in_transit.backend_id = arr_station.source
+ left join backends as backend on in_transit.backend_id = backend.id
+ order by checkin_time desc
+ ;
+ }
+ );
+ $db->query(
+ qq{
+ update schema_version set version = 62;
+ }
+ );
+ },
+
+ # v62 -> v63
+ # Add EFA backend support
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ alter table schema_version add column efa varchar(12);
+ update schema_version set version = 63;
+ update schema_version set efa = '0';
+ }
+ );
+ },
+
+ # v63 -> v64
+ # Relax train_type length constraints for EFA and MOTIS checkins
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view in_transit_str;
+ drop view journeys_str;
+ drop view users_with_backend;
+ drop view follows_in_transit;
+
+ alter table in_transit alter column train_type type varchar(32);
+ alter table journeys alter column train_type type varchar(32);
+
+ create view in_transit_str as select
+ user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.dbris as is_dbris,
+ backend.motis as is_motis,
+ backend.name as backend_name, in_transit.backend_id as backend_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ dep_station_external_id.external_id as dep_external_id,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ arr_station_external_id.external_id as arr_external_id,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and in_transit.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and in_transit.backend_id = arr_station.source
+ left join stations_external_ids as dep_station_external_id on checkin_station_id = dep_station_external_id.eva and in_transit.backend_id = dep_station_external_id.backend_id
+ left join stations_external_ids as arr_station_external_id on checkout_station_id = arr_station_external_id.eva and in_transit.backend_id = arr_station_external_id.backend_id
+ left join backends as backend on in_transit.backend_id = backend.id
+ ;
+ create view journeys_str as select
+ journeys.id as journey_id, user_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.dbris as is_dbris,
+ backend.motis as is_motis,
+ backend.name as backend_name, journeys.backend_id as backend_id,
+ train_type, train_line, train_no, train_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ dep_station_external_id.external_id as dep_external_id,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ arr_station_external_id.external_id as arr_external_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, edited, route, messages, user_data,
+ dep_platform, arr_platform
+ from journeys
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and journeys.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and journeys.backend_id = arr_station.source
+ left join stations_external_ids as dep_station_external_id on checkin_station_id = dep_station_external_id.eva and journeys.backend_id = dep_station_external_id.backend_id
+ left join stations_external_ids as arr_station_external_id on checkout_station_id = arr_station_external_id.eva and journeys.backend_id = arr_station_external_id.backend_id
+ left join backends as backend on journeys.backend_id = backend.id
+ ;
+ create view users_with_backend as select
+ users.id as id, users.name as name, status, public_level,
+ email, password, registered_at, last_seen,
+ deletion_requested, deletion_notified, use_history,
+ accept_follows, notifications, profile, backend_id, iris,
+ hafas, efa, dbris, motis, backend.name as backend_name
+ from users
+ left join backends as backend on users.backend_id = backend.id
+ ;
+ create view follows_in_transit as select
+ r1.subject_id as follower_id, user_id as followee_id,
+ users.name as followee_name,
+ train_type, train_line, train_no, train_id,
+ backend.iris as is_iris, backend.hafas as is_hafas,
+ backend.efa as is_efa, backend.dbris as is_dbris,
+ backend.motis as is_motis,
+ backend.name as backend_name, in_transit.backend_id as backend_id,
+ extract(epoch from checkin_time) as checkin_ts,
+ extract(epoch from sched_departure) as sched_dep_ts,
+ extract(epoch from real_departure) as real_dep_ts,
+ checkin_station_id as dep_eva,
+ dep_station.ds100 as dep_ds100,
+ dep_station.name as dep_name,
+ dep_station.lat as dep_lat,
+ dep_station.lon as dep_lon,
+ extract(epoch from checkout_time) as checkout_ts,
+ extract(epoch from sched_arrival) as sched_arr_ts,
+ extract(epoch from real_arrival) as real_arr_ts,
+ checkout_station_id as arr_eva,
+ arr_station.ds100 as arr_ds100,
+ arr_station.name as arr_name,
+ arr_station.lat as arr_lat,
+ arr_station.lon as arr_lon,
+ polyline_id,
+ polylines.polyline as polyline,
+ visibility,
+ coalesce(visibility, users.public_level & 127) as effective_visibility,
+ cancelled, route, messages, user_data,
+ dep_platform, arr_platform, data
+ from in_transit
+ left join polylines on polylines.id = polyline_id
+ left join users on users.id = user_id
+ left join relations as r1 on r1.predicate = 1 and r1.object_id = user_id
+ left join stations as dep_station on checkin_station_id = dep_station.eva and in_transit.backend_id = dep_station.source
+ left join stations as arr_station on checkout_station_id = arr_station.eva and in_transit.backend_id = arr_station.source
+ left join backends as backend on in_transit.backend_id = backend.id
+ order by checkin_time desc
+ ;
+
+ update schema_version set version = 64;
+ }
+ );
+ },
+
+ # v64 -> v65
+ # stations_str: add is_motis
+ sub {
+ my ($db) = @_;
+ $db->query(
+ qq{
+ drop view stations_str;
+ create view stations_str as
+ select stations.name as name,
+ eva, lat, lon,
+ backends.name as backend,
+ dbris as is_dbris,
+ efa as is_efa,
+ iris as is_iris,
+ hafas as is_hafas,
+ motis as is_motis
+ from stations
+ left join backends
+ on source = backends.id;
+ update schema_version set version = 65;
+ }
+ );
+ },
);
+sub sync_stations {
+ my ( $db, $iris_version ) = @_;
+
+ $db->update( 'schema_version',
+ { iris => $Travel::Status::DE::IRIS::Stations::VERSION } );
+
+ say 'Updating stations table, this may take a while ...';
+ my $total = scalar Travel::Status::DE::IRIS::Stations::get_stations();
+ my $count = 0;
+ for my $s ( Travel::Status::DE::IRIS::Stations::get_stations() ) {
+ my ( $ds100, $name, $eva, $lon, $lat ) = @{$s};
+ if ( $ENV{__TRAVELYNX_TEST_MINI_IRIS}
+ and ( $eva < 8000000 or $eva > 8000100 ) )
+ {
+ next;
+ }
+ $db->insert(
+ 'stations',
+ {
+ eva => $eva,
+ ds100 => $ds100,
+ name => $name,
+ lat => $lat,
+ lon => $lon,
+ source => 0,
+ archived => 0
+ },
+ {
+ on_conflict => \
+'(eva, source) do update set archived = false, source = 0, ds100 = EXCLUDED.ds100, name=EXCLUDED.name, lat=EXCLUDED.lat, lon=EXCLUDED.lon'
+ }
+ );
+ if ( $count++ % 1000 == 0 ) {
+ printf( " %2.0f%% complete\n", $count * 100 / $total );
+ }
+ }
+ say ' done';
+
+ my $res1 = $db->query(
+ qq{
+ select checkin_station_id
+ from journeys
+ left join stations on journeys.checkin_station_id = stations.eva
+ where stations.eva is null
+ limit 1;
+ }
+ )->hash;
+
+ my $res2 = $db->query(
+ qq{
+ select checkout_station_id
+ from journeys
+ left join stations on journeys.checkout_station_id = stations.eva
+ where stations.eva is null
+ limit 1;
+ }
+ )->hash;
+
+ if ( $res1 or $res2 ) {
+ say 'Dropping stats cache for archived stations ...';
+ $db->query('truncate journey_stats;');
+ }
+
+ say 'Updating archived stations ...';
+ my $old_stations
+ = JSON->new->utf8->decode( scalar read_file('share/old_stations.json') );
+ if ( $ENV{__TRAVELYNX_TEST_MINI_IRIS} ) {
+ $old_stations = [];
+ }
+ for my $s ( @{$old_stations} ) {
+ $db->insert(
+ 'stations',
+ {
+ eva => $s->{eva},
+ ds100 => $s->{ds100},
+ name => $s->{name},
+ lat => $s->{latlong}[0],
+ lon => $s->{latlong}[1],
+ source => 0,
+ archived => 1
+ },
+ { on_conflict => undef }
+ );
+ }
+
+ if ( $iris_version == 0 ) {
+ say 'Applying EVA ID changes ...';
+ for my $change (
+ [ 721394, 301002, 'RKBP: Kronenplatz (U), Karlsruhe' ],
+ [
+ 721356, 901012,
+ 'RKME: Ettlinger Tor/Staatstheater (U), Karlsruhe'
+ ],
+ )
+ {
+ my ( $old, $new, $desc ) = @{$change};
+ my $rows = $db->update(
+ 'journeys',
+ { checkout_station_id => $new },
+ { checkout_station_id => $old }
+ )->rows;
+ $rows += $db->update(
+ 'journeys',
+ { checkin_station_id => $new },
+ { checkin_station_id => $old }
+ )->rows;
+ if ($rows) {
+ say "$desc ($old -> $new) : $rows rows";
+ }
+ }
+ }
+
+ say 'Checking for unknown EVA IDs ...';
+ my $found = 0;
+
+ $res1 = $db->query(
+ qq{
+ select checkin_station_id
+ from journeys
+ left join stations on journeys.checkin_station_id = stations.eva
+ where stations.eva is null;
+ }
+ );
+
+ $res2 = $db->query(
+ qq{
+ select checkout_station_id
+ from journeys
+ left join stations on journeys.checkout_station_id = stations.eva
+ where stations.eva is null;
+ }
+ );
+
+ my %notified;
+ while ( my $row = $res1->hash ) {
+ my $eva = $row->{checkin_station_id};
+ if ( not $found ) {
+ $found = 1;
+ say '';
+ say '------------8<----------';
+ say 'Travel::Status::DE::IRIS v'
+ . $Travel::Status::DE::IRIS::Stations::VERSION;
+ }
+ if ( not $notified{$eva} ) {
+ say $eva;
+ $notified{$eva} = 1;
+ }
+ }
+
+ while ( my $row = $res2->hash ) {
+ my $eva = $row->{checkout_station_id};
+ if ( not $found ) {
+ $found = 1;
+ say '';
+ say '------------8<----------';
+ say 'Travel::Status::DE::IRIS v'
+ . $Travel::Status::DE::IRIS::Stations::VERSION;
+ }
+ if ( not $notified{$eva} ) {
+ say $eva;
+ $notified{$eva} = 1;
+ }
+ }
+
+ if ($found) {
+ say '------------8<----------';
+ say '';
+ say
+'Due to a conceptual flaw in past travelynx releases, your database contains unknown EVA IDs.';
+ say
+'Please file a bug report titled "Missing EVA IDs after DB migration" at https://github.com/derf/travelynx/issues';
+ say 'and include the list shown above in the bug report.';
+ say
+'If you do not have a GitHub account, please send an E-Mail to derf+travelynx@finalrewind.org instead.';
+ say '';
+ say 'This issue does not affect usability or long-term data integrity,';
+ say 'and handling it is not time-critical.';
+ say
+'Past journeys referencing unknown EVA IDs may have inaccurate distance statistics,';
+ say
+'but this will be resolved once a future release handles those EVA IDs.';
+ say 'Note that this issue was already present in previous releases.';
+ }
+ else {
+ say 'None found.';
+ }
+}
+
+sub sync_backends_efa {
+ my ($db) = @_;
+ for my $service ( Travel::Status::DE::EFA::get_services() ) {
+ my $present = $db->select(
+ 'backends',
+ 'count(*) as count',
+ {
+ efa => 1,
+ name => $service->{shortname}
+ }
+ )->hash->{count};
+ if ( not $present ) {
+ $db->insert(
+ 'backends',
+ {
+ dbris => 0,
+ efa => 1,
+ hafas => 0,
+ iris => 0,
+ motis => 0,
+ name => $service->{shortname},
+ },
+ { on_conflict => undef }
+ );
+ }
+ }
+
+ $db->update( 'schema_version',
+ { efa => $Travel::Status::DE::EFA::VERSION } );
+}
+
+sub sync_backends_hafas {
+ my ($db) = @_;
+ for my $service ( Travel::Status::DE::HAFAS::get_services() ) {
+ my $present = $db->select(
+ 'backends',
+ 'count(*) as count',
+ {
+ hafas => 1,
+ name => $service->{shortname}
+ }
+ )->hash->{count};
+ if ( not $present ) {
+ $db->insert(
+ 'backends',
+ {
+ dbris => 0,
+ efa => 0,
+ hafas => 1,
+ iris => 0,
+ motis => 0,
+ name => $service->{shortname},
+ },
+ { on_conflict => undef }
+ );
+ }
+ }
+
+ $db->update( 'schema_version',
+ { hafas => $Travel::Status::DE::HAFAS::VERSION } );
+}
+
+sub sync_backends_motis {
+ my ($db) = @_;
+ for my $service ( Travel::Status::MOTIS::get_services() ) {
+ my $present = $db->select(
+ 'backends',
+ 'count(*) as count',
+ {
+ motis => 1,
+ name => $service->{shortname}
+ }
+ )->hash->{count};
+ if ( not $present ) {
+ $db->insert(
+ 'backends',
+ {
+ dbris => 0,
+ efa => 0,
+ hafas => 0,
+ iris => 0,
+ motis => 1,
+ name => $service->{shortname},
+ },
+ { on_conflict => undef }
+ );
+ }
+ }
+
+ $db->update( 'schema_version',
+ { motis => $Travel::Status::MOTIS::VERSION } );
+}
+
sub setup_db {
my ($db) = @_;
my $tx = $db->begin;
@@ -956,31 +3504,119 @@ sub setup_db {
}
}
+sub failure_hints {
+ my ($old_version) = @_;
+ say STDERR 'This travelynx instance has reached an undefined state:';
+ say STDERR
+'The source code is expecting a different schema version than present in the database.';
+ say STDERR
+'Please file a detailed bug report at <https://github.com/derf/travelynx/issues>';
+ say STDERR 'or send an e-mail to derf+travelynx@finalrewind.org.';
+ if ($old_version) {
+ say STDERR '';
+ say STDERR
+ "The last migration was performed with travelynx v${old_version}.";
+ say STDERR
+'You may be able to return to a working state with the following command:';
+ say STDERR "git checkout ${old_version}";
+ say STDERR '';
+ say STDERR 'We apologize for any inconvenience.';
+ }
+}
+
sub migrate_db {
- my ($db) = @_;
+ my ( $self, $db ) = @_;
my $tx = $db->begin;
my $schema_version = get_schema_version($db);
say "Found travelynx schema v${schema_version}";
+ my $old_version;
+
+ if ( $schema_version >= 29 ) {
+ $old_version = get_schema_version( $db, 'travelynx' );
+ }
+
if ( $schema_version == @migrations ) {
- say "Database layout is up-to-date";
+ say 'Database layout is up-to-date';
+ }
+ else {
+ eval {
+ for my $i ( $schema_version .. $#migrations ) {
+ printf( "Updating to v%d ...\n", $i + 1 );
+ $migrations[$i]($db);
+ }
+ say 'Update complete.';
+ };
+ if ($@) {
+ say STDERR "Migration failed: $@";
+ say STDERR "Rolling back to v${schema_version}";
+ failure_hints($old_version);
+ exit(1);
+ }
}
- eval {
- for my $i ( $schema_version .. $#migrations ) {
- printf( "Updating to v%d ...\n", $i + 1 );
- $migrations[$i]($db);
+ my $iris_version = get_schema_version( $db, 'iris' );
+ say "Found IRIS station table v${iris_version}";
+ if ( $iris_version eq $Travel::Status::DE::IRIS::Stations::VERSION ) {
+ say 'Station table is up-to-date';
+ }
+ else {
+ eval {
+ say
+"Synchronizing with Travel::Status::DE::IRIS $Travel::Status::DE::IRIS::Stations::VERSION";
+ sync_stations( $db, $iris_version );
+ say 'Synchronization complete.';
+ };
+ if ($@) {
+ say STDERR "Synchronization failed: $@";
+ if ( $schema_version != @migrations ) {
+ say STDERR "Rolling back to v${schema_version}";
+ failure_hints($old_version);
+ }
+ exit(1);
}
- };
- if ($@) {
- say STDERR "Migration failed: $@";
- say STDERR "Rolling back to v${schema_version}";
- exit(1);
}
+ my $efa_version = get_schema_version( $db, 'efa' );
+ say "Found backend table for EFA v${efa_version}";
+ if ( $efa_version eq $Travel::Status::DE::EFA::VERSION ) {
+ say 'Backend table is up-to-date';
+ }
+ else {
+ say
+"Synchronizing with Travel::Status::DE::EFA $Travel::Status::DE::EFA::VERSION";
+ sync_backends_efa($db);
+ }
+
+ my $hafas_version = get_schema_version( $db, 'hafas' );
+ say "Found backend table for HAFAS v${hafas_version}";
+ if ( $hafas_version eq $Travel::Status::DE::HAFAS::VERSION ) {
+ say 'Backend table is up-to-date';
+ }
+ else {
+ say
+"Synchronizing with Travel::Status::DE::HAFAS $Travel::Status::DE::HAFAS::VERSION";
+ sync_backends_hafas($db);
+ }
+
+ my $motis_version = get_schema_version( $db, 'motis' ) // '0';
+ say "Found backend table for Motis v${motis_version}";
+ if ( $motis_version eq $Travel::Status::MOTIS::VERSION ) {
+ say 'Backend table is up-to-date';
+ }
+ else {
+ say
+"Synchronizing with Travel::Status::MOTIS $Travel::Status::MOTIS::VERSION";
+ sync_backends_motis($db);
+ }
+
+ $db->update( 'schema_version',
+ { travelynx => $self->app->config->{version} } );
+
if ( get_schema_version($db) == @migrations ) {
$tx->commit;
+ say 'Changes committed to database. Have a nice day.';
}
else {
printf STDERR (
@@ -989,6 +3625,8 @@ sub migrate_db {
get_schema_version($db)
);
say STDERR "Rolling back to v${schema_version}";
+ say STDERR "";
+ failure_hints($old_version);
exit(1);
}
}
@@ -1007,10 +3645,13 @@ sub run {
if ( not defined get_schema_version($db) ) {
setup_db($db);
}
- migrate_db($db);
+ $self->migrate_db($db);
}
elsif ( $command eq 'has-current-schema' ) {
- if ( get_schema_version($db) == @migrations ) {
+ if ( get_schema_version($db) == @migrations
+ and get_schema_version( $db, 'iris' ) eq
+ $Travel::Status::DE::IRIS::Stations::VERSION )
+ {
say "yes";
}
else {
@@ -1035,5 +3676,5 @@ __END__
Recommended workflow:
> systemctl stop travelynx
- > perl index.pl migrate
+ > perl index.pl database migrate
> systemctl start travelynx
diff --git a/lib/Travelynx/Command/dumpconfig.pm b/lib/Travelynx/Command/dumpconfig.pm
index d2a6761..2c308c9 100644
--- a/lib/Travelynx/Command/dumpconfig.pm
+++ b/lib/Travelynx/Command/dumpconfig.pm
@@ -1,4 +1,8 @@
package Travelynx::Command::dumpconfig;
+
+# Copyright (C) 2020-2023 Birte Kristina Friesel
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
use Mojo::Base 'Mojolicious::Command';
use Data::Dumper;
diff --git a/lib/Travelynx/Command/dumpstops.pm b/lib/Travelynx/Command/dumpstops.pm
new file mode 100644
index 0000000..15f5861
--- /dev/null
+++ b/lib/Travelynx/Command/dumpstops.pm
@@ -0,0 +1,52 @@
+package Travelynx::Command::dumpstops;
+
+# Copyright (C) 2024-2025 Birte Kristina Friesel
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
+
+use Mojo::Base 'Mojolicious::Command';
+use List::Util qw();
+use Text::CSV;
+
+has description => 'Export known stops to CSV';
+
+has usage => sub { shift->extract_usage };
+
+sub run {
+ my ( $self, $command, $filename ) = @_;
+ my $db = $self->app->pg->db;
+
+ if ( not $command or not $filename ) {
+ $self->help;
+ }
+ elsif ( $command eq 'csv' ) {
+ open( my $fh, '>:encoding(utf-8)', $filename )
+ or die("open($filename): $!\n");
+
+ my $csv = Text::CSV->new( { eol => "\r\n" } );
+ $csv->combine(qw(name eva lat lon backend is_dbris is_efa is_iris is_hafas is_motis));
+ print $fh $csv->string;
+
+ my $iter = $self->app->stations->get_db_iterator;
+ while ( my $row = $iter->hash ) {
+ $csv->combine(
+ @{$row}{qw{name eva lat lon backend is_dbris is_efa is_iris is_hafas is_motis}} );
+ print $fh $csv->string;
+ }
+ close($fh);
+ }
+ else {
+ $self->help;
+ }
+}
+
+1;
+
+__END__
+
+=head1 SYNOPSIS
+
+ Usage: index.pl dumpstops <format> <filename>
+
+ Exports known stops to <filename>.
+ Right now, only the "csv" format is supported.
diff --git a/lib/Travelynx/Command/influxdb.pm b/lib/Travelynx/Command/influxdb.pm
new file mode 100644
index 0000000..4b779a2
--- /dev/null
+++ b/lib/Travelynx/Command/influxdb.pm
@@ -0,0 +1,204 @@
+package Travelynx::Command::influxdb;
+
+# Copyright (C) 2022 Birte Kristina Friesel
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
+use Mojo::Base 'Mojolicious::Command';
+
+use DateTime;
+
+has description => 'Generate statistics for InfluxDB';
+
+has usage => sub { shift->extract_usage };
+
+sub query_to_influx {
+ my ( $label, $value ) = @_;
+
+ if ( defined $value ) {
+ return sprintf( '%s=%f', $label, $value );
+ }
+ return;
+}
+
+sub run {
+ my ($self) = @_;
+
+ my $db = $self->app->pg->db;
+
+ my $now = DateTime->now( time_zone => 'Europe/Berlin' );
+ my $active = $now->clone->subtract( months => 1 );
+
+ my @stats;
+ my @backend_stats;
+ my @traewelling;
+
+ push(
+ @stats,
+ query_to_influx(
+ 'pending_user_count',
+ $db->select( 'users', 'count(*) as count', { status => 0 } )
+ ->hash->{count}
+ )
+ );
+ push(
+ @stats,
+ query_to_influx(
+ 'reg_user_count',
+ $db->select( 'users', 'count(*) as count', { status => 1 } )
+ ->hash->{count}
+ )
+ );
+ push(
+ @stats,
+ query_to_influx(
+ 'active_user_count',
+ $db->select(
+ 'users',
+ 'count(*) as count',
+ {
+ status => 1,
+ last_seen => { '>', $active }
+ }
+ )->hash->{count}
+ )
+ );
+
+ push(
+ @stats,
+ query_to_influx(
+ 'checked_in_count',
+ $db->select( 'in_transit', 'count(*) as count' )->hash->{count}
+ )
+ );
+ push(
+ @stats,
+ query_to_influx(
+ 'checkin_count',
+ $db->select( 'journeys', 'count(*) as count' )->hash->{count}
+ )
+ );
+ push(
+ @stats,
+ query_to_influx(
+ 'polyline_count',
+ $db->select( 'polylines', 'count(*) as count' )->hash->{count}
+ )
+ );
+
+ my @backends = $self->app->stations->get_backends;
+
+ for my $backend (@backends) {
+ push(
+ @backend_stats,
+ [
+ $backend->{iris} ? 'IRIS' : $backend->{name},
+ $db->select(
+ 'stations',
+ 'count(*) as count',
+ {
+ source => $backend->{id},
+ archived => 0
+ }
+ )->hash->{count},
+ $db->select(
+ 'related_stations',
+ 'count(*) as count',
+ {
+ backend_id => $backend->{id},
+ }
+ )->hash->{count}
+ ]
+ );
+ }
+
+ push(
+ @traewelling,
+ query_to_influx(
+ 'pull_user_count',
+ $db->select(
+ 'traewelling',
+ 'count(*) as count',
+ { pull_sync => 1 }
+ )->hash->{count}
+ )
+ );
+ push(
+ @traewelling,
+ query_to_influx(
+ 'push_user_count',
+ $db->select(
+ 'traewelling',
+ 'count(*) as count',
+ { push_sync => 1 }
+ )->hash->{count}
+ )
+ );
+ push(
+ @stats,
+ query_to_influx(
+ 'polyline_ratio',
+ $db->query(
+'select (select count(polyline_id) from journeys)::float / (select count(*) from polylines) as ratio'
+ )->hash->{ratio}
+ )
+ );
+
+ if ( $self->app->mode eq 'development' ) {
+ $self->app->log->debug( 'POST '
+ . $self->app->config->{influxdb}->{url}
+ . ' stats '
+ . join( ',', @stats ) );
+ for my $backend_entry (@backend_stats) {
+ $self->app->log->debug(
+ 'POST '
+ . $self->app->config->{influxdb}->{url}
+ . ' stations,backend='
+ . $backend_entry->[0]
+ . sprintf(
+ ' count=%d,meta=%d',
+ $backend_entry->[1], $backend_entry->[2]
+ )
+ );
+ }
+ $self->app->log->debug( 'POST '
+ . $self->app->config->{influxdb}->{url}
+ . ' traewelling '
+ . join( ',', @traewelling ) );
+ }
+ elsif ( $self->app->config->{influxdb}->{url} ) {
+ $self->app->ua->post_p(
+ $self->app->config->{influxdb}->{url},
+ 'stats ' . join( ',', @stats )
+ )->wait;
+ my $buf = q{};
+ for my $backend_entry (@backend_stats) {
+ $buf
+ .= "\nstations,backend="
+ . $backend_entry->[0]
+ . sprintf( ' count=%d,meta=%d',
+ $backend_entry->[1], $backend_entry->[2] );
+ }
+ $self->app->ua->post_p( $self->app->config->{influxdb}->{url}, $buf )
+ ->wait;
+ $self->app->ua->post_p(
+ $self->app->config->{influxdb}->{url},
+ 'traewelling ' . join( ',', @traewelling )
+ )->wait;
+ }
+ else {
+ $self->app->log->warn(
+ "influxdb command called, but no influxdb url has been configured");
+ }
+
+ return;
+}
+
+1;
+
+__END__
+
+=head1 SYNOPSIS
+
+ Usage: index.pl influxdb
+
+ Write statistics to InfluxDB
diff --git a/lib/Travelynx/Command/integritycheck.pm b/lib/Travelynx/Command/integritycheck.pm
new file mode 100644
index 0000000..be5fe71
--- /dev/null
+++ b/lib/Travelynx/Command/integritycheck.pm
@@ -0,0 +1,173 @@
+package Travelynx::Command::integritycheck;
+
+# Copyright (C) 2022 Birte Kristina Friesel
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
+
+use Mojo::Base 'Mojolicious::Command';
+use List::Util qw();
+use Travel::Status::DE::IRIS::Stations;
+
+sub run {
+ my ( $self, $mode ) = @_;
+ my $found = 0;
+ my $db = $self->app->pg->db;
+
+ if ( $mode eq 'all' or $mode eq 'unknown-evas' ) {
+
+ my %notified;
+ my $res1 = $db->query(
+ qq{
+ select checkin_station_id
+ from journeys
+ left join stations on journeys.checkin_station_id = stations.eva
+ where stations.eva is null;
+ }
+ );
+ my $res2 = $db->query(
+ qq{
+ select checkout_station_id
+ from journeys
+ left join stations on journeys.checkout_station_id = stations.eva
+ where stations.eva is null;
+ }
+ );
+
+ while ( my $row = $res1->hash ) {
+ my $eva = $row->{checkin_station_id};
+ if ( not $found ) {
+ $found = 1;
+ say
+'Journeys in the travelynx database contain the following unknown EVA IDs.';
+ say '------------8<----------';
+ say 'Travel::Status::DE::IRIS v'
+ . $Travel::Status::DE::IRIS::Stations::VERSION;
+ }
+ if ( not $notified{$eva} ) {
+ say $eva;
+ $notified{$eva} = 1;
+ }
+ }
+
+ while ( my $row = $res2->hash ) {
+ my $eva = $row->{checkout_station_id};
+ if ( not $found ) {
+ $found = 1;
+ say
+'Journeys in the travelynx database contain the following unknown EVA IDs.';
+ say '------------8<----------';
+ say 'Travel::Status::DE::IRIS v'
+ . $Travel::Status::DE::IRIS::Stations::VERSION;
+ }
+ if ( not $notified{$eva} ) {
+ say $eva;
+ $notified{$eva} = 1;
+ }
+ }
+ }
+
+ if ($found) {
+ say '------------8<----------';
+ say '';
+ $found = 0;
+ }
+
+ if ( $mode eq 'all' or $mode eq 'unknown-route-entries' ) {
+
+ my %notified;
+ my $rename = $self->app->renamed_station;
+ my $res = $db->select( 'journeys', [ 'route', 'edited' ] )->expand;
+
+ while ( my $j = $res->hash ) {
+ if ( $j->{edited} & 0x0010 ) {
+ next;
+ }
+ my @stops = @{ $j->{route} // [] };
+ for my $stop (@stops) {
+ my $stop_name = $stop->[0];
+ if ( $rename->{ $stop->[0] } ) {
+ $stop->[0] = $rename->{ $stop->[0] };
+ }
+ }
+ my @unknown
+ = $self->app->stations->grep_unknown( map { $_->[0] } @stops );
+ for my $stop_name (@unknown) {
+ if ( not $notified{$stop_name} ) {
+ if ( not $found ) {
+ say
+'Journeys in the travelynx database contain the following unknown route entries.';
+ say
+ 'Note that this check ignores manual route entries.';
+ say
+'All reports refer to routes obtained via HAFAS/IRIS.';
+ say '------------8<----------';
+ say 'Travel::Status::DE::IRIS v'
+ . $Travel::Status::DE::IRIS::Stations::VERSION;
+ $found = 1;
+ }
+ say $stop_name;
+ $notified{$stop_name} = 1;
+ }
+ }
+ }
+ }
+
+ if ($found) {
+ say '------------8<----------';
+ say '';
+ $found = 0;
+ }
+
+ if ( $mode eq 'all' or $mode eq 'checkout-eva-vs-route-eva' ) {
+
+ my $res = $db->select(
+ 'journeys_str',
+ [ 'journey_id', 'sched_arr_ts', 'route', 'arr_name', 'arr_eva' ],
+ { backend_id => 0 }
+ )->expand;
+
+ journey: while ( my $j = $res->hash ) {
+ my $found_in_route;
+ my $found_arr;
+ for my $stop ( @{ $j->{route} // [] } ) {
+ if ( not $stop->[1] ) {
+ next journey;
+ }
+ if ( $stop->[1] == $j->{arr_eva} ) {
+ $found_in_route = 1;
+ last;
+ }
+ if ( $stop->[2]{sched_arr}
+ and $j->{sched_arr_ts}
+ and $stop->[2]{sched_arr} == int( $j->{sched_arr_ts} ) )
+ {
+ $found_arr = $stop;
+ }
+ }
+ if ( $found_arr and not $found_in_route ) {
+ if ( not $found ) {
+ say q{};
+ say
+'The following journeys have route entries which do not agree with checkout EVA ID.';
+ say
+'checkout station ID (left) vs route entry with matching checkout time (right)';
+ say '------------8<----------';
+ $found = 1;
+ }
+ printf(
+ "%7d %d (%s) vs %d (%s)\n",
+ $j->{journey_id}, $j->{arr_eva}, $j->{arr_name},
+ $found_arr->[1], $found_arr->[0]
+ );
+ }
+ }
+ }
+
+ if ($found) {
+ say '------------8<----------';
+ say '';
+ $found = 0;
+ }
+}
+
+1;
diff --git a/lib/Travelynx/Command/maintenance.pm b/lib/Travelynx/Command/maintenance.pm
index 5cbf982..7baf762 100644
--- a/lib/Travelynx/Command/maintenance.pm
+++ b/lib/Travelynx/Command/maintenance.pm
@@ -1,4 +1,8 @@
package Travelynx::Command::maintenance;
+
+# Copyright (C) 2020-2023 Birte Kristina Friesel
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
use Mojo::Base 'Mojolicious::Command';
use DateTime;
@@ -10,22 +14,15 @@ has usage => sub { shift->extract_usage };
sub run {
my ( $self, $filename ) = @_;
- my $now = DateTime->now( time_zone => 'Europe/Berlin' );
- my $checkin_deadline = $now->clone->subtract( hours => 48 );
- my $verification_deadline = $now->clone->subtract( hours => 48 );
- my $deletion_deadline = $now->clone->subtract( hours => 72 );
- my $old_deadline = $now->clone->subtract( years => 1 );
+ my $now = DateTime->now( time_zone => 'Europe/Berlin' );
+ my $verification_deadline = $now->clone->subtract( hours => 48 );
+ my $deletion_deadline = $now->clone->subtract( hours => 72 );
+ my $old_deadline = $now->clone->subtract( years => 1 );
+ my $old_notification_deadline = $now->clone->subtract( weeks => 4 );
my $db = $self->app->pg->db;
my $tx = $db->begin;
- my $res = $db->delete( 'in_transit',
- { checkin_time => { '<', $checkin_deadline } } );
-
- if ( my $rows = $res->rows ) {
- printf( "Removed %d incomplete checkins\n", $rows );
- }
-
my $unverified = $db->select(
'users',
'id, email, extract(epoch from registered_at) as registered_ts',
@@ -72,7 +69,7 @@ sub run {
printf( "Pruned unverified user %d\n", $user->{id} );
}
- $res = $db->delete( 'pending_passwords',
+ my $res = $db->delete( 'pending_passwords',
{ requested_at => { '<', $verification_deadline } } );
if ( my $rows = $res->rows ) {
@@ -86,12 +83,40 @@ sub run {
printf( "Pruned %d pending mail change(s)\n", $rows );
}
+ my $to_notify = $db->select(
+ 'users',
+ [ 'id', 'name', 'email', 'last_seen' ],
+ {
+ last_seen => { '<', $old_deadline },
+ deletion_notified => undef
+ }
+ );
+
+ for my $user ( $to_notify->hashes->each ) {
+ say "Sending account deletion notification to uid $user->{id}...";
+ $self->app->sendmail->age_deletion_notification(
+ name => $user->{name},
+ email => $user->{email},
+ last_seen => $user->{last_seen},
+ login_url => $self->app->base_url_for('login')->to_abs,
+ account_url => $self->app->base_url_for('account')->to_abs,
+ imprint_url => $self->app->base_url_for('impressum')->to_abs,
+ );
+ $self->app->users->mark_deletion_notified( uid => $user->{id} );
+ }
+
my $to_delete = $db->select( 'users', ['id'],
{ deletion_requested => { '<', $deletion_deadline } } );
my @uids_to_delete = $to_delete->arrays->map( sub { shift->[0] } )->each;
- $to_delete
- = $db->select( 'users', ['id'], { last_seen => { '<', $old_deadline } } );
+ $to_delete = $db->select(
+ 'users',
+ ['id'],
+ {
+ last_seen => { '<', $old_deadline },
+ deletion_notified => { '<', $old_notification_deadline }
+ }
+ );
push( @uids_to_delete,
$to_delete->arrays->map( sub { shift->[0] } )->each );
@@ -101,30 +126,30 @@ sub run {
"About to delete %d accounts, which is quite a lot.\n",
scalar @uids_to_delete
);
+ for my $uid (@uids_to_delete) {
+ my $journeys_res = $db->select(
+ 'journeys',
+ 'count(*) as count',
+ { user_id => $uid }
+ )->hash;
+ printf STDERR (
+ " - UID %5d (%4d journeys)\n",
+ $uid, $journeys_res->{count}
+ );
+ }
say STDERR 'Aborting maintenance. Please investigate.';
exit(1);
}
for my $uid (@uids_to_delete) {
say "Deleting uid ${uid}...";
- my $tokens_res = $db->delete( 'tokens', { user_id => $uid } );
- my $stats_res = $db->delete( 'journey_stats', { user_id => $uid } );
- my $journeys_res = $db->delete( 'journeys', { user_id => $uid } );
- my $transit_res = $db->delete( 'in_transit', { user_id => $uid } );
- my $password_res
- = $db->delete( 'pending_passwords', { user_id => $uid } );
- my $user_res = $db->delete( 'users', { id => $uid } );
-
+ my $count = $self->app->users->delete(
+ uid => $uid,
+ db => $db,
+ in_transaction => 1
+ );
printf( " %d tokens, %d monthly stats, %d journeys\n",
- $tokens_res->rows, $stats_res->rows, $journeys_res->rows );
-
- if ( $user_res->rows != 1 ) {
- printf STDERR (
- "Deleted %d rows from users, expected 1. Rollback and abort.\n",
- $user_res->rows
- );
- exit(1);
- }
+ $count->{tokens}, $count->{stats}, $count->{journeys} );
}
$tx->commit;
diff --git a/lib/Travelynx/Command/munin.pm b/lib/Travelynx/Command/munin.pm
index ee509d3..3b6e393 100644
--- a/lib/Travelynx/Command/munin.pm
+++ b/lib/Travelynx/Command/munin.pm
@@ -1,4 +1,8 @@
package Travelynx::Command::munin;
+
+# Copyright (C) 2020-2023 Birte Kristina Friesel
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
use Mojo::Base 'Mojolicious::Command';
use DateTime;
@@ -11,7 +15,7 @@ sub query_to_munin {
my ( $label, $value ) = @_;
if ( defined $value ) {
- printf( "%s.value %d\n", $label, $value );
+ printf( "%s.value %f\n", $label, $value );
}
}
@@ -26,6 +30,19 @@ sub run {
my $checkin_window_query
= qq{select count(*) as count from journeys where checkin_time > to_timestamp(?);};
+ # DateTime's math does not like time zones: When subtracting 7 days from
+ # sun 2am and the previous sunday was the switch from CET to CEST (i.e.,
+ # the switch to daylight saving time), the resulting datetime is invalid.
+ # This is a fatal error. We avoid this edge case by performing date math
+ # on the epoch timestamp, which does not know or care about time zones and
+ # daylight saving time.
+ my $one_day = 24 * 60 * 60;
+ my $one_week = 7 * $one_day;
+ my $one_month = 30 * $one_day;
+
+ query_to_munin( 'pending_user_count',
+ $db->select( 'users', 'count(*) as count', { status => 0 } )
+ ->hash->{count} );
query_to_munin( 'reg_user_count',
$db->select( 'users', 'count(*) as count', { status => 1 } )
->hash->{count} );
@@ -42,19 +59,28 @@ sub run {
);
query_to_munin( 'checked_in',
$db->select( 'in_transit', 'count(*) as count' )->hash->{count} );
- query_to_munin(
- 'checkins_24h',
- $db->query( $checkin_window_query,
- $now->subtract( hours => 24 )->epoch )->hash->{count}
- );
+ query_to_munin( 'checkins_24h',
+ $db->query( $checkin_window_query, $now->epoch - $one_day )
+ ->hash->{count} );
query_to_munin( 'checkins_7d',
- $db->query( $checkin_window_query, $now->subtract( days => 7 )->epoch )
+ $db->query( $checkin_window_query, $now->epoch - $one_week )
+ ->hash->{count} );
+ query_to_munin( 'checkins_30d',
+ $db->query( $checkin_window_query, $now->epoch - $one_month )
+ ->hash->{count} );
+ query_to_munin( 'polylines',
+ $db->select( 'polylines', 'count(*) as count' )->hash->{count} );
+ query_to_munin( 'traewelling_pull',
+ $db->select( 'traewelling', 'count(*) as count', { pull_sync => 1 } )
+ ->hash->{count} );
+ query_to_munin( 'traewelling_push',
+ $db->select( 'traewelling', 'count(*) as count', { push_sync => 1 } )
->hash->{count} );
query_to_munin(
- 'checkins_30d',
+ 'polyline_ratio',
$db->query(
- $checkin_window_query, $now->subtract( days => 30 )->epoch
- )->hash->{count}
+'select (select count(polyline_id) from journeys)::float / (select count(*) from polylines) as ratio'
+ )->hash->{ratio}
);
}
diff --git a/lib/Travelynx/Command/traewelling.pm b/lib/Travelynx/Command/traewelling.pm
new file mode 100644
index 0000000..e4e0134
--- /dev/null
+++ b/lib/Travelynx/Command/traewelling.pm
@@ -0,0 +1,239 @@
+package Travelynx::Command::traewelling;
+
+# Copyright (C) 2023 Birte Kristina Friesel
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
+use Mojo::Base 'Mojolicious::Command';
+use Mojo::Promise;
+
+use DateTime;
+use JSON;
+use List::Util;
+
+has description => 'Synchronize with Traewelling';
+
+has usage => sub { shift->extract_usage };
+
+sub pull_sync {
+ my ($self) = @_;
+ my %pull_result;
+ my $request_count = 0;
+ for my $account_data ( $self->app->traewelling->get_pull_accounts ) {
+
+ if ( -e 'maintenance' ) {
+ $self->app->log->debug(
+ 'treawelling: "maintenance" file found, aborting');
+ return;
+ }
+
+ my $in_transit = $self->app->in_transit->get(
+ uid => $account_data->{user_id},
+ );
+ if ($in_transit) {
+ $self->app->log->debug(
+"Skipping Traewelling status pull for UID $account_data->{user_id}: already checked in"
+ );
+ next;
+ }
+
+ if ( not defined $account_data->{data}{user_name} ) {
+ $self->app->log->debug(
+"travelynx user $account_data->{user_id} has a Traewellig connection, but no username"
+ );
+ next;
+ }
+
+ # $account_data->{user_id} is the travelynx uid
+ # $account_data->{user_name} is the Träwelling username
+ $request_count += 1;
+ $self->app->log->debug(
+"Scheduling Traewelling status pull for UID $account_data->{user_id}"
+ );
+
+ # In 'work', the event loop is not running,
+ # so there's no need to multiply by $request_count at the moment
+ Mojo::Promise->timer(1.5)->then(
+ sub {
+ return $self->app->traewelling_api->get_status_p(
+ username => $account_data->{data}{user_name},
+ token => $account_data->{token}
+ );
+ }
+ )->then(
+ sub {
+ my ($traewelling) = @_;
+ $pull_result{ $traewelling->{http} } += 1;
+ return $self->app->traewelling_to_travelynx_p(
+ traewelling => $traewelling,
+ user_data => $account_data
+ );
+ }
+ )->catch(
+ sub {
+ my ($err) = @_;
+ $pull_result{ $err->{http} // 0 } += 1;
+ $self->app->traewelling->log(
+ uid => $account_data->{user_id},
+ message => "Fehler bei der Status-Abfrage: $err->{text}",
+ is_error => 1
+ );
+ $self->app->log->debug("Error $err->{text}");
+ }
+ )->wait;
+ }
+
+ return \%pull_result;
+}
+
+sub push_sync {
+ my ($self) = @_;
+ my %push_result;
+
+ for my $candidate ( $self->app->traewelling->get_pushable_accounts ) {
+
+ if ( -e 'maintenance' ) {
+ $self->app->log->debug(
+ 'treawelling: "maintenance" file found, aborting');
+ return;
+ }
+
+ $self->app->log->debug(
+ "Pushing to Traewelling for UID $candidate->{uid}");
+ my $trip_id = $candidate->{journey_data}{trip_id};
+ if ( not $trip_id ) {
+ $self->app->log->debug("... trip_id is missing");
+ $self->app->traewelling->log(
+ uid => $candidate->{uid},
+ message =>
+"Konnte $candidate->{train_type} $candidate->{train_no} nicht übertragen: Keine trip_id vorhanden",
+ is_error => 1
+ );
+ next;
+ }
+ if ( $candidate->{data}{latest_push_ts}
+ and $candidate->{data}{latest_push_ts} == $candidate->{checkin_ts} )
+ {
+ $self->app->log->debug("... already handled");
+ next;
+ }
+ $self->app->traewelling_api->checkin_p( %{$candidate},
+ trip_id => $trip_id )->then(
+ sub {
+ my ($status) = @_;
+ $push_result{ $status->{http} } += 1;
+ }
+ )->catch(
+ sub {
+ my ($status) = @_;
+ $push_result{ $status->{http} // 0 } += 1;
+ }
+ )->wait;
+ }
+
+ return \%push_result;
+}
+
+sub run {
+ my ( $self, $direction ) = @_;
+
+ my $now = DateTime->now( time_zone => 'Europe/Berlin' );
+ my $started_at = $now;
+ my $push_result;
+ my $pull_result;
+
+ if ( -e 'maintenance' ) {
+ $self->app->log->debug(
+ 'treawelling: "maintenance" file found, aborting');
+ return;
+ }
+
+ if ( not $direction or $direction eq 'push' ) {
+ $push_result = $self->push_sync;
+ }
+
+ my $trwl_push_finished_at = DateTime->now( time_zone => 'Europe/Berlin' );
+
+ if ( not $direction or $direction eq 'pull' ) {
+ $pull_result = $self->pull_sync;
+ }
+
+ my $trwl_pull_finished_at = DateTime->now( time_zone => 'Europe/Berlin' );
+
+ if ( -e 'maintenance' ) {
+ $self->app->log->debug(
+ 'treawelling: "maintenance" file found, aborting');
+ return;
+ }
+
+ my $trwl_push_duration = $trwl_push_finished_at->epoch - $started_at->epoch;
+ my $trwl_pull_duration
+ = $trwl_pull_finished_at->epoch - $trwl_push_finished_at->epoch;
+ my $trwl_duration = $trwl_pull_finished_at->epoch - $started_at->epoch;
+
+ if ( $self->app->config->{influxdb}->{url} ) {
+ my $report = "sync_runtime_seconds=${trwl_duration}";
+ if ( not $direction or $direction eq 'push' ) {
+ $report .= ",push_runtime_seconds=${trwl_push_duration}";
+ }
+ if ( not $direction or $direction eq 'pull' ) {
+ $report .= ",pull_runtime_seconds=${trwl_pull_duration}";
+ }
+ if ( $self->app->mode eq 'development' ) {
+ $self->app->log->debug( 'POST '
+ . $self->app->config->{influxdb}->{url}
+ . " traewelling ${report}" );
+ }
+ else {
+ $self->app->ua->post_p( $self->app->config->{influxdb}->{url},
+ "traewelling ${report}" )->wait;
+ }
+
+ if ($push_result) {
+ for my $status ( keys %{$push_result} ) {
+ my $count = $push_result->{$status};
+ if ( $self->app->mode eq 'development' ) {
+ $self->app->log->debug( 'POST '
+ . $self->app->config->{influxdb}->{url}
+ . " traewelling_push,http=$status count=$count" );
+ }
+ else {
+ $self->app->ua->post_p(
+ $self->app->config->{influxdb}->{url},
+ "traewelling_push,http=$status count=$count"
+ )->wait;
+ }
+ }
+ }
+
+ if ($pull_result) {
+ for my $status ( keys %{$pull_result} ) {
+ my $count = $pull_result->{$status};
+ if ( $self->app->mode eq 'development' ) {
+ $self->app->log->debug( 'POST '
+ . $self->app->config->{influxdb}->{url}
+ . " traewelling_pull,http=$status count=$count" );
+ }
+ else {
+ $self->app->ua->post_p(
+ $self->app->config->{influxdb}->{url},
+ "traewelling_pull,http=$status count=$count"
+ )->wait;
+ }
+ }
+ }
+ }
+}
+
+1;
+
+__END__
+
+=head1 SYNOPSIS
+
+ Usage: index.pl traewelling [direction]
+
+ Performs both push and pull synchronization by default.
+ If "direction" is specified, only synchronizes in the specified direction
+ ("push" or "pull")
+
+ Should be called from a cronjob every three to ten minutes.
diff --git a/lib/Travelynx/Command/work.pm b/lib/Travelynx/Command/work.pm
index fbbf958..071befa 100644
--- a/lib/Travelynx/Command/work.pm
+++ b/lib/Travelynx/Command/work.pm
@@ -1,150 +1,771 @@
package Travelynx::Command::work;
+
+# Copyright (C) 2020-2023 Birte Kristina Friesel
+# Copyright (C) 2025 networkException <git@nwex.de>
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
use Mojo::Base 'Mojolicious::Command';
+use Mojo::Promise;
+
+use utf8;
use DateTime;
use JSON;
use List::Util;
-has description =>
- 'Perform automatic checkout when users arrive at their destination';
+has description => 'Update real-time data of active journeys';
has usage => sub { shift->extract_usage };
sub run {
- my ($self) = @_;
+ my ( $self, $backend ) = @_;
+
+ my $now = DateTime->now( time_zone => 'Europe/Berlin' );
+ my $checkin_deadline = $now->clone->subtract( hours => 48 );
+ my $json = JSON->new;
- my $now = DateTime->now( time_zone => 'Europe/Berlin' );
- my $json = JSON->new;
+ if ( -e 'maintenance' ) {
+ $self->app->log->debug('work: "maintenance" file found, aborting');
+ return;
+ }
+
+ my $num_incomplete = $self->app->in_transit->delete_incomplete_checkins(
+ earlier_than => $checkin_deadline );
- my $db = $self->app->pg->db;
+ if ($num_incomplete) {
+ $self->app->log->debug("Removed ${num_incomplete} incomplete checkins");
+ }
- for my $entry (
- $db->select( 'in_transit_str', '*', { cancelled => 0 } )->hashes->each )
- {
+ my $errors = 0;
+ my $backend_issues = 0;
+ my $rate_limit_counts = 0;
+ my $dbris_rate_limited = 0;
+
+ for my $entry ( $self->app->in_transit->get_all_active ) {
+
+ if ( -e 'maintenance' ) {
+ $self->app->log->debug('work: "maintenance" file found, aborting');
+ return;
+ }
my $uid = $entry->{user_id};
my $dep = $entry->{dep_eva};
my $arr = $entry->{arr_eva};
my $train_id = $entry->{train_id};
- # Note: IRIS data is not always updated in real-time. Both departure and
- # arrival delays may take several minutes to appear, especially in case
- # of large-scale disturbances. We work around this by continuing to
- # update departure data for up to 15 minutes after departure and
- # delaying automatic checkout by at least 10 minutes.
+ if ( $train_id eq 'manual'
+ and ( not $backend or $backend eq 'manual' ) )
+ {
+ if ( $arr
+ and $entry->{real_arr_ts}
+ and $now->epoch - $entry->{real_arr_ts} > 600 )
+ {
+ $self->app->checkout_p(
+ station => $arr,
+ force => 2,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->wait;
+ }
+ }
- eval {
- if ( $now->epoch - $entry->{real_dep_ts} < 900 ) {
- my $status = $self->app->get_departures( $dep, 30, 30 );
- if ( $status->{errstr} ) {
- die("get_departures($dep): $status->{errstr}\n");
- }
+ elsif ( $entry->{is_dbris} and ( not $backend or $backend eq 'dbris' ) )
+ {
- my ($train) = List::Util::first { $_->train_id eq $train_id }
- @{ $status->{results} };
+ eval {
- if ( not $train ) {
- die("could not find train $train_id at $dep\n");
- }
+ Mojo::Promise->timer(
+ $dbris_rate_limited ? 4.5 : ( $backend ? 1.2 : 1.0 ) )
+ ->then(
+ sub {
+ return $self->app->dbris->get_journey_p(
+ trip_id => $train_id );
+ }
+ )->then(
+ sub {
+ my ($journey) = @_;
+
+ $dbris_rate_limited = 0;
+
+ my $found_dep;
+ my $found_arr;
+ for my $stop ( $journey->route ) {
+ if ( $stop->eva == $dep ) {
+ $found_dep = $stop;
+ }
+ if ( $arr and $stop->eva == $arr ) {
+ $found_arr = $stop;
+ last;
+ }
+ }
+ if ( not $found_dep ) {
+ $self->app->log->debug(
+ "Did not find $dep within journey $train_id");
+ return;
+ }
- $db->update(
- 'in_transit',
- {
- dep_platform => $train->platform,
- real_departure => $train->departure,
- route =>
- $json->encode( [ $self->app->route_diff($train) ] ),
- messages => $json->encode(
- [
- map { [ $_->[0]->epoch, $_->[1] ] }
- $train->messages
- ]
- ),
- },
- { user_id => $uid }
- );
- $self->app->add_route_timestamps( $uid, $train, 1 );
+ if ( $found_dep->rt_dep ) {
+ $self->app->in_transit->update_departure_dbris(
+ uid => $uid,
+ journey => $journey,
+ stop => $found_dep,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ train_id => $train_id,
+ );
+ }
+ if ( $found_dep->sched_dep
+ and $found_dep->dep->epoch > $now->epoch )
+ {
+ $self->app->add_wagonorder(
+ uid => $uid,
+ train_id => $train_id,
+ is_departure => 1,
+ eva => $dep,
+ datetime => $found_dep->sched_dep,
+ train_type => $journey->type,
+ train_no => $journey->number,
+ );
+ $self->app->add_stationinfo( $uid, 1,
+ $train_id, $found_dep->eva );
+ }
+
+ if (
+ $found_arr
+ and
+ ( $found_arr->rt_arr or $found_arr->is_cancelled )
+ )
+ {
+ $self->app->in_transit->update_arrival_dbris(
+ uid => $uid,
+ journey => $journey,
+ train_id => $train_id,
+ stop => $found_arr,
+ dep_eva => $dep,
+ arr_eva => $arr
+ );
+ }
+ if ( $found_arr and $found_arr->rt_arr ) {
+ if ( $found_arr->arr->epoch - $now->epoch < 600 ) {
+ $self->app->add_wagonorder(
+ uid => $uid,
+ train_id => $train_id,
+ is_arrival => 1,
+ eva => $arr,
+ datetime => $found_arr->sched_dep,
+ train_type => $journey->type,
+ train_no => $journey->number,
+ );
+ $self->app->add_stationinfo( $uid, 0,
+ $train_id, $found_dep->eva,
+ $found_arr->eva );
+ }
+ }
+ if ( $found_arr and $found_arr->is_cancelled ) {
+
+ # check out (adds a cancelled journey and resets journey state
+ # to destination selection)
+ $self->app->checkout_p(
+ station => $arr,
+ force => 0,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->wait;
+ }
+ }
+ )->catch(
+ sub {
+ my ($err) = @_;
+ $self->app->log->debug(
+"work($uid) @ DBRIS $entry->{backend_name}: journey: $err"
+ );
+ if ( $err =~ m{HTTP 429} ) {
+ $dbris_rate_limited = 1;
+ $rate_limit_counts += 1;
+ }
+ else {
+ $backend_issues += 1;
+ }
+ }
+ )->wait;
+
+ if ( $arr
+ and $entry->{real_arr_ts}
+ and $now->epoch - $entry->{real_arr_ts} > 600 )
+ {
+ $self->app->checkout_p(
+ station => $arr,
+ force => 2,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->wait;
+ }
+ };
+ if ($@) {
+ $errors += 1;
+ $self->app->log->error(
+ "work($uid) @ DBRIS $entry->{backend_name}: $@");
}
- };
- if ($@) {
- $self->app->log->error("work($uid)/departure: $@");
}
- eval {
- if (
- $arr
- and ( not $entry->{real_arr_ts}
- or $now->epoch - $entry->{real_arr_ts} < 600 )
- )
- {
- my $status = $self->app->get_departures( $arr, 20, 220 );
- if ( $status->{errstr} ) {
- die("get_departures($arr): $status->{errstr}\n");
+ elsif ( $entry->{is_efa} and ( not $backend or $backend eq 'efa' ) ) {
+ eval {
+ $self->app->efa->get_journey_p(
+ trip_id => $train_id,
+ service => $entry->{backend_name}
+ )->then(
+ sub {
+ my ($journey) = @_;
+
+ my $found_dep;
+ my $found_arr;
+ for my $stop ( $journey->route ) {
+ if ( $stop->id_num == $dep ) {
+ $found_dep = $stop;
+ }
+ if ( $arr and $stop->id_num == $arr ) {
+ $found_arr = $stop;
+ last;
+ }
+ }
+ if ( not $found_dep ) {
+ $self->app->log->debug(
+ "Did not find $dep within journey $train_id");
+ return;
+ }
+
+ if ( $found_dep->rt_dep ) {
+ $self->app->in_transit->update_departure_efa(
+ uid => $uid,
+ journey => $journey,
+ stop => $found_dep,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ trip_id => $train_id,
+ );
+ }
+
+ if (
+ $found_arr
+ and
+ ( $found_arr->rt_arr or $found_arr->is_cancelled )
+ )
+ {
+ $self->app->in_transit->update_arrival_efa(
+ uid => $uid,
+ journey => $journey,
+ stop => $found_arr,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ trip_id => $train_id,
+ );
+ }
+ if ( $found_arr and $found_arr->is_cancelled ) {
+
+ # check out (adds a cancelled journey and resets journey state
+ # to destination selection)
+ $self->app->checkout_p(
+ station => $arr,
+ force => 0,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->wait;
+ }
+ }
+ )->catch(
+ sub {
+ my ($err) = @_;
+ $backend_issues += 1;
+ $self->app->log->error(
+"work($uid) @ EFA $entry->{backend_name}: journey: $err"
+ );
+ }
+ )->wait;
+
+ if ( $arr
+ and $entry->{real_arr_ts}
+ and $now->epoch - $entry->{real_arr_ts} > 600 )
+ {
+ $self->app->checkout_p(
+ station => $arr,
+ force => 2,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->wait;
}
+ };
+ if ($@) {
+ $errors += 1;
+ $self->app->log->error(
+ "work($uid) @ EFA $entry->{backend_name}: $@");
+ }
+ }
+
+ elsif ( $entry->{is_motis} and ( not $backend or $backend eq 'motis' ) )
+ {
+
+ eval {
+ $self->app->motis->get_trip_p(
+ service => $entry->{backend_name},
+ trip_id => $train_id,
+ )->then(
+ sub {
+ my ($journey) = @_;
+
+ for my $stopover ( $journey->stopovers ) {
+ if ( not defined $stopover->stop->{eva} ) {
+
+ # Looks like MOTIS / transitous station IDs can change after the fact.
+ # So let's be safe rather than sorry, even if this causes way too many calls to the slow path
+ # (Stations::get_by_external_id uses string lookups).
+ # This function call implicitly sets $stopover->stop->{eva} for MOTIS backends.
+ $self->app->stations->add_or_update(
+ stop => $stopover->stop,
+ motis => $entry->{backend_name},
+ );
+
+ $self->app->log->debug( "mapped "
+ . $stopover->stop->id . " to "
+ . $stopover->stop->{eva} );
+ }
+ }
+
+ my $found_departure;
+ my $found_arrival;
+ for my $stopover ( $journey->stopovers ) {
+ if ( $stopover->stop->{eva} == $dep ) {
+ $found_departure = $stopover;
+ }
+
+ if ( $arr and $stopover->stop->{eva} == $arr ) {
+ $found_arrival = $stopover;
+ last;
+ }
+ }
+
+ if ( not $found_departure ) {
+ $self->app->log->debug(
+ "Did not find $dep within trip $train_id");
+ return;
+ }
- # Note that a train may pass the same station several times.
- # Notable example: S41 / S42 ("Ringbahn") both starts and
- # terminates at Berlin Südkreuz
- my ($train) = List::Util::first {
- $_->train_id eq $train_id
- and $_->sched_arrival
- and $_->sched_arrival->epoch > $entry->{sched_dep_ts}
+ if ( $found_departure->realtime_departure ) {
+ $self->app->in_transit->update_departure_motis(
+ uid => $uid,
+ journey => $journey,
+ stopover => $found_departure,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ train_id => $train_id,
+ );
+ }
+
+ if ( $found_arrival
+ and $found_arrival->realtime_arrival )
+ {
+ $self->app->in_transit->update_arrival_motis(
+ uid => $uid,
+ journey => $journey,
+ train_id => $train_id,
+ stopover => $found_arrival,
+ dep_eva => $dep,
+ arr_eva => $arr
+ );
+ }
+ }
+ )->catch(
+ sub {
+ my ($err) = @_;
+ $self->app->log->error(
+"work($uid) @ MOTIS $entry->{backend_name}: journey: $err"
+ );
+ }
+ )->wait;
+
+ if ( $arr
+ and $entry->{real_arr_ts}
+ and $now->epoch - $entry->{real_arr_ts} > 600 )
+ {
+ $self->app->checkout_p(
+ station => $arr,
+ force => 2,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->wait;
}
- @{ $status->{results} };
+ };
+ if ($@) {
+ $errors += 1;
+ $self->app->log->error(
+ "work($uid) @ MOTIS $entry->{backend_name}: $@");
+ }
+ }
+
+ elsif ( $entry->{is_hafas} and ( not $backend or $backend eq 'hafas' ) )
+ {
+
+ eval {
- $train //= List::Util::first { $_->train_id eq $train_id }
- @{ $status->{results} };
+ $self->app->hafas->get_journey_p(
+ trip_id => $train_id,
+ service => $entry->{backend_name}
+ )->then(
+ sub {
+ my ($journey) = @_;
- if ( not $train ) {
+ my $found_dep;
+ my $found_arr;
+ for my $stop ( $journey->route ) {
+ if ( $stop->loc->eva == $dep ) {
+ $found_dep = $stop;
+ }
+ if ( $arr and $stop->loc->eva == $arr ) {
+ $found_arr = $stop;
+ last;
+ }
+ }
+ if ( not $found_dep ) {
+ $self->app->log->debug(
+ "Did not find $dep within journey $train_id");
+ return;
+ }
- # If we haven't seen the train yet, its arrival is probably
- # too far in the future. This is not critical.
- return;
+ if ( $found_dep->rt_dep ) {
+ $self->app->in_transit->update_departure_hafas(
+ uid => $uid,
+ journey => $journey,
+ stop => $found_dep,
+ dep_eva => $dep,
+ arr_eva => $arr
+ );
+ }
+ if (
+ $found_dep->sched_dep
+ and ( $entry->{backend_id} <= 1
+ or $entry->{backend_name} eq 'VRN'
+ or $entry->{backend_name} eq 'ÖBB' )
+ and $journey->class <= 16
+ and $found_dep->dep->epoch > $now->epoch
+ )
+ {
+ $self->app->add_wagonorder(
+ uid => $uid,
+ train_id => $journey->id,
+ is_departure => 1,
+ eva => $dep,
+ datetime => $found_dep->sched_dep,
+ train_type => $journey->type =~ s{ +$}{}r,
+ train_no => $journey->number,
+ );
+ $self->app->add_stationinfo( $uid, 1,
+ $journey->id, $found_dep->loc->eva );
+ }
+
+ if ( $found_arr and $found_arr->rt_arr ) {
+ $self->app->in_transit->update_arrival_hafas(
+ uid => $uid,
+ journey => $journey,
+ stop => $found_arr,
+ dep_eva => $dep,
+ arr_eva => $arr
+ );
+ if (
+ (
+ $entry->{backend_id} <= 1
+ or $entry->{backend_name} eq 'VRN'
+ or $entry->{backend_name} eq 'ÖBB'
+ )
+ and $journey->class <= 16
+ and $found_arr->arr->epoch - $now->epoch < 600
+ )
+ {
+ $self->app->add_wagonorder(
+ uid => $uid,
+ train_id => $journey->id,
+ is_arrival => 1,
+ eva => $arr,
+ datetime => $found_arr->sched_dep,
+ train_type => $journey->type,
+ train_no => $journey->number,
+ );
+ $self->app->add_stationinfo( $uid, 0,
+ $journey->id, $found_dep->loc->eva,
+ $found_arr->loc->eva );
+ }
+ }
+ }
+ )->catch(
+ sub {
+ my ($err) = @_;
+ $backend_issues += 1;
+ if ( $err
+ =~ m{svcResL\[0\][.]err is (?:FAIL|PARAMETER)$}
+ or $err =~ m{timeout} )
+ {
+ # These are not actionable.
+ $self->app->log->debug(
+"work($uid) @ HAFAS $entry->{backend_name}: journey: $err"
+ );
+ }
+ else {
+ $self->app->log->error(
+"work($uid) @ HAFAS $entry->{backend_name}: journey: $err"
+ );
+ }
+ }
+ )->wait;
+
+ if ( $arr
+ and $entry->{real_arr_ts}
+ and $now->epoch - $entry->{real_arr_ts} > 600 )
+ {
+ $self->app->checkout_p(
+ station => $arr,
+ force => 2,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->wait;
}
+ };
+ if ($@) {
+ $errors += 1;
+ $self->app->log->error(
+ "work($uid) @ HAFAS $entry->{backend_name}: $@");
+ }
+ }
+
+ # TODO irgendwo ist hier ne race condition wo ein neuer checkin (in HAFAS) mit IRIS-Daten überschrieben wird.
+ # Die ganzen updates brauchen wirklich mal sanity checks mit train id ...
+
+ # Note: IRIS data is not always updated in real-time. Both departure and
+ # arrival delays may take several minutes to appear, especially in case
+ # of large-scale disturbances. We work around this by continuing to
+ # update departure data for up to 15 minutes after departure and
+ # delaying automatic checkout by at least 10 minutes.
+
+ elsif ( $entry->{is_iris} and ( not $backend or $backend eq 'iris' ) ) {
+ eval {
+ if ( $now->epoch - $entry->{real_dep_ts} < 900 ) {
+ my $status = $self->app->iris->get_departures(
+ station => $dep,
+ lookbehind => 30,
+ lookahead => 30
+ );
+ if ( $status->{errstr} ) {
+ die("get_departures($dep): $status->{errstr}\n");
+ }
+
+ my ($train)
+ = List::Util::first { $_->train_id eq $train_id }
+ @{ $status->{results} };
+
+ if ( not $train ) {
+ $self->app->log->debug(
+ "could not find train $train_id at $dep\n");
+ return;
+ }
+
+ $self->app->in_transit->update_departure(
+ uid => $uid,
+ train => $train,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ route => [ $self->app->iris->route_diff($train) ]
+ );
- $db->update(
- 'in_transit',
- {
- arr_platform => $train->platform,
- sched_arrival => $train->sched_arrival,
- real_arrival => $train->arrival,
- route =>
- $json->encode( [ $self->app->route_diff($train) ] ),
- messages => $json->encode(
- [
- map { [ $_->[0]->epoch, $_->[1] ] }
- $train->messages
- ]
- ),
- },
- { user_id => $uid }
- );
- $self->app->add_route_timestamps( $uid, $train, 0 );
+ if ( $train->departure_is_cancelled and $arr ) {
+ my $checked_in
+ = $self->app->in_transit->update_departure_cancelled(
+ uid => $uid,
+ train => $train,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ );
+
+ # depending on the amount of users in transit, some time may
+ # have passed between fetching $entry from the database and
+ # now. Only check out if the user is still checked into this
+ # train.
+ if ($checked_in) {
+
+ # check out (adds a cancelled journey and resets journey state
+ # to checkin
+ $self->app->checkout_p(
+ station => $arr,
+ force => 2,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->wait;
+ }
+ }
+ else {
+ $self->app->add_route_timestamps( $uid, $train, 1 );
+ $self->app->add_wagonorder(
+ uid => $uid,
+ train_id => $train->train_id,
+ is_departure => 1,
+ eva => $dep,
+ datetime => $train->sched_departure,
+ train_type => $train->type,
+ train_no => $train->train_no
+ );
+ $self->app->add_stationinfo( $uid, 1, $train->train_id,
+ $dep, $arr );
+ }
+ }
+ };
+ if ($@) {
+ $errors += 1;
+ $self->app->log->error("work($uid) @ IRIS: departure: $@");
}
- elsif ( $entry->{real_arr_ts} ) {
- my ( undef, $error ) = $self->app->checkout( $arr, 1, $uid );
- if ($error) {
- die("${error}\n");
+
+ eval {
+ if (
+ $arr
+ and ( not $entry->{real_arr_ts}
+ or $now->epoch - $entry->{real_arr_ts} < 600 )
+ )
+ {
+ my $status = $self->app->iris->get_departures(
+ station => $arr,
+ lookbehind => 20,
+ lookahead => 220
+ );
+ if ( $status->{errstr} ) {
+ die("get_departures($arr): $status->{errstr}\n");
+ }
+
+ # Note that a train may pass the same station several times.
+ # Notable example: S41 / S42 ("Ringbahn") both starts and
+ # terminates at Berlin Südkreuz
+ my ($train) = List::Util::first {
+ $_->train_id eq $train_id
+ and $_->sched_arrival
+ and $_->sched_arrival->epoch > $entry->{sched_dep_ts}
+ }
+ @{ $status->{results} };
+
+ $train //= List::Util::first { $_->train_id eq $train_id }
+ @{ $status->{results} };
+
+ if ( not $train ) {
+
+ # If we haven't seen the train yet, its arrival is probably
+ # too far in the future. This is not critical.
+ return;
+ }
+
+ my $checked_in = $self->app->in_transit->update_arrival(
+ uid => $uid,
+ train => $train,
+ route => [ $self->app->iris->route_diff($train) ],
+ dep_eva => $dep,
+ arr_eva => $arr,
+ );
+
+ if ( $checked_in and $train->arrival_is_cancelled ) {
+
+ # check out (adds a cancelled journey and resets journey state
+ # to destination selection)
+ $self->app->checkout_p(
+ station => $arr,
+ force => 0,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->wait;
+ }
+ else {
+ $self->app->add_route_timestamps(
+ $uid, $train, 0,
+ (
+ defined $entry->{real_arr_ts}
+ and $now->epoch > $entry->{real_arr_ts}
+ ) ? 1 : 0
+ );
+ $self->app->add_wagonorder(
+ uid => $uid,
+ train_id => $train->train_id,
+ is_arrival => 1,
+ eva => $arr,
+ datetime => $train->sched_departure,
+ train_type => $train->type,
+ train_no => $train->train_no
+ );
+ $self->app->add_stationinfo( $uid, 0, $train->train_id,
+ $dep, $arr );
+ }
+ }
+ elsif ( $entry->{real_arr_ts} ) {
+ my ( undef, $error ) = $self->app->checkout_p(
+ station => $arr,
+ force => 2,
+ dep_eva => $dep,
+ arr_eva => $arr,
+ uid => $uid
+ )->catch(
+ sub {
+ my ($error) = @_;
+ $backend_issues += 1;
+ $self->app->log->error(
+ "work($uid) @ IRIS: arrival: $error");
+ $errors += 1;
+ }
+ )->wait;
}
+ };
+ if ($@) {
+ $self->app->log->error("work($uid) @ IRIS: arrival: $@");
+ $errors += 1;
}
- };
- if ($@) {
- $self->app->log->error("work($uid)/arrival: $@");
+
+ eval { };
}
- eval { }
}
- # Computing yearly stats may take a while, but we've got all time in the
- # world here. This means users won't have to wait when loading their
- # own by-year journey log.
- for my $user ( $db->select( 'users', 'id', { status => 1 } )->hashes->each )
- {
- $self->app->get_journey_stats(
- uid => $user->{id},
- year => $now->year
- );
+ my $started_at = $now;
+ my $main_finished_at = DateTime->now( time_zone => 'Europe/Berlin' );
+ my $worker_duration = $main_finished_at->epoch - $started_at->epoch;
+
+ if ( $self->app->config->{influxdb}->{url} ) {
+ my $tags = q{};
+ if ($backend) {
+ $tags .= ",backend=${backend}";
+ }
+ if ( $self->app->mode eq 'development' ) {
+ $self->app->log->debug( 'POST '
+ . $self->app->config->{influxdb}->{url}
+ . " worker${tags} runtime_seconds=${worker_duration},errors=${errors},backend_errors=${backend_issues},ratelimit_count=${rate_limit_counts}"
+ );
+ }
+ else {
+ $self->app->ua->post_p( $self->app->config->{influxdb}->{url},
+"worker${tags} runtime_seconds=${worker_duration},errors=${errors},backend_errors=${backend_issues},ratelimit_count=${rate_limit_counts}"
+ )->wait;
+ }
+ }
+
+ if ( not $self->app->config->{traewelling}->{separate_worker} ) {
+ $self->app->start('traewelling');
}
+
+ # add_wagonorder and add_stationinfo assume a permanently running IOLoop
+ # and do not allow Mojolicious commands to wait until they have completed.
+ # Hence, some add_wagonorder and add_stationinfo calls made here may not
+ # complete before the work command exits, and thus have no effect.
+ #
+ # This is not ideal and will need fixing at some point. Until then, here
+ # is the pragmatic solution for 99% of the associated issues.
+ Mojo::Promise->timer(5)->wait;
}
1;
diff --git a/lib/Travelynx/Command/worker.pm b/lib/Travelynx/Command/worker.pm
index 6b70f2e..be7431f 100644
--- a/lib/Travelynx/Command/worker.pm
+++ b/lib/Travelynx/Command/worker.pm
@@ -1,24 +1,31 @@
package Travelynx::Command::worker;
+
+# Copyright (C) 2020-2023 Birte Kristina Friesel
+#
+# SPDX-License-Identifier: AGPL-3.0-or-later
use Mojo::Base 'Mojolicious::Command';
use Mojo::IOLoop;
-has description =>
- 'travelynx background worker';
+has description => 'travelynx background worker';
has usage => sub { shift->extract_usage };
sub run {
my ($self) = @_;
- Mojo::IOLoop->recurring(180 => sub {
- $self->app->start('work');
- });
+ Mojo::IOLoop->recurring(
+ 180 => sub {
+ $self->app->start('work');
+ }
+ );
- Mojo::IOLoop->recurring(3600 => sub {
- $self->app->start('maintenance');
- });
+ Mojo::IOLoop->recurring(
+ 36000 => sub {
+ $self->app->start('maintenance');
+ }
+ );
- if (not Mojo::IOLoop->is_running) {
+ if ( not Mojo::IOLoop->is_running ) {
Mojo::IOLoop->start;
}
}
@@ -33,4 +40,4 @@ __END__
Background worker for cron-less setups, e.g. Docker.
- Calls "index.pl work" every 3 minutes and "index.pl maintenance" every 1 hour.
+ Calls "index.pl work" every 3 minutes and "index.pl maintenance" every 10 hours.