Rewrite parser: use proximity clustering instead of speed-based detection
This commit is contained in:
@@ -2,13 +2,17 @@ from odoo import models, fields, api, _
|
||||
from odoo.exceptions import UserError
|
||||
import json
|
||||
import base64
|
||||
from datetime import datetime
|
||||
import bisect
|
||||
from datetime import datetime, timedelta
|
||||
from math import radians, sin, cos, sqrt, atan2
|
||||
from collections import Counter
|
||||
|
||||
STILL_ACTIVITIES = {'STILL', 'UNKNOWN', 'TILTING', 'EXITING_VEHICLE'}
|
||||
VEHICLE_ACTIVITIES = {'IN_VEHICLE', 'IN_ROAD_VEHICLE', 'IN_RAIL_VEHICLE', 'IN_TWO_WHEELER_VEHICLE'}
|
||||
WALKING_ACTIVITIES = {'WALKING', 'ON_FOOT', 'RUNNING', 'ON_BICYCLE'}
|
||||
|
||||
# Positions within this distance (meters) are considered the same location
|
||||
PROXIMITY_METERS = 200
|
||||
|
||||
|
||||
def _haversine_miles(lat1, lon1, lat2, lon2):
|
||||
R = 3958.8
|
||||
@@ -18,6 +22,10 @@ def _haversine_miles(lat1, lon1, lat2, lon2):
|
||||
return R * 2 * atan2(sqrt(a), sqrt(1 - a))
|
||||
|
||||
|
||||
def _distance_meters(lat1, lon1, lat2, lon2):
|
||||
return _haversine_miles(lat1, lon1, lat2, lon2) * 1609.34
|
||||
|
||||
|
||||
def _get_travel_mode(activity_type):
|
||||
if activity_type in VEHICLE_ACTIVITIES:
|
||||
return 'driving'
|
||||
@@ -26,6 +34,15 @@ def _get_travel_mode(activity_type):
|
||||
return 'unknown'
|
||||
|
||||
|
||||
def _dominant_travel_mode(activities, start_ts, end_ts):
|
||||
"""Get dominant travel mode from activity records between two timestamps."""
|
||||
window = [a for a in activities if start_ts <= a['ts'] <= end_ts]
|
||||
if not window:
|
||||
return 'unknown'
|
||||
counts = Counter(a['type'] for a in window)
|
||||
return _get_travel_mode(counts.most_common(1)[0][0])
|
||||
|
||||
|
||||
class WtImportTimelineWizard(models.TransientModel):
|
||||
_name = 'wt.import.timeline.wizard'
|
||||
_description = 'Import Google Timeline'
|
||||
@@ -37,11 +54,15 @@ class WtImportTimelineWizard(models.TransientModel):
|
||||
default=5,
|
||||
help='Ignore stops shorter than this duration'
|
||||
)
|
||||
proximity_meters = fields.Integer(
|
||||
string='Location Proximity (meters)',
|
||||
default=200,
|
||||
help='GPS positions within this distance are grouped as the same location'
|
||||
)
|
||||
geocode = fields.Boolean(
|
||||
string='Resolve Addresses via OpenStreetMap',
|
||||
default=True,
|
||||
)
|
||||
result_message = fields.Char(string='Result', readonly=True)
|
||||
|
||||
def action_import(self):
|
||||
self.ensure_one()
|
||||
@@ -51,7 +72,7 @@ class WtImportTimelineWizard(models.TransientModel):
|
||||
except Exception as e:
|
||||
raise UserError(_('Invalid JSON file: %s') % str(e))
|
||||
|
||||
stops = self._parse_timeline(data)
|
||||
stops = self._parse_timeline(data, self.proximity_meters)
|
||||
if not stops:
|
||||
raise UserError(_('No location stops found in the uploaded file.'))
|
||||
|
||||
@@ -78,12 +99,12 @@ class WtImportTimelineWizard(models.TransientModel):
|
||||
stop['distance_from_previous'] = 0.0
|
||||
stop['travel_time_from_previous'] = 0.0
|
||||
|
||||
# Get existing arrived_at timestamps to skip duplicates
|
||||
# Get existing arrived_at timestamps to avoid duplicates
|
||||
LocationLog = self.env['wt.location.log']
|
||||
existing = set(
|
||||
LocationLog.search([]).mapped(
|
||||
lambda r: r.arrived_at.strftime('%Y-%m-%d %H:%M:%S') if r.arrived_at else ''
|
||||
)
|
||||
r.arrived_at.strftime('%Y-%m-%d %H:%M:%S')
|
||||
for r in LocationLog.search([])
|
||||
if r.arrived_at
|
||||
)
|
||||
|
||||
created_ids = []
|
||||
@@ -93,7 +114,6 @@ class WtImportTimelineWizard(models.TransientModel):
|
||||
departed = stop['departed_at'].replace(tzinfo=None)
|
||||
arrived_str = arrived.strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
# Skip if already imported
|
||||
if arrived_str in existing:
|
||||
skipped += 1
|
||||
continue
|
||||
@@ -128,7 +148,15 @@ class WtImportTimelineWizard(models.TransientModel):
|
||||
'target': 'current',
|
||||
}
|
||||
|
||||
def _parse_timeline(self, data):
|
||||
def _parse_timeline(self, data, proximity_meters=200):
|
||||
"""
|
||||
Parse Google Timeline Edits JSON into location stops.
|
||||
|
||||
Google records positions primarily when the device is stationary.
|
||||
We cluster consecutive positions within proximity_meters into a single stop.
|
||||
The gap between clusters = travel time.
|
||||
Activity records between clusters determine the travel mode.
|
||||
"""
|
||||
positions = []
|
||||
activities = []
|
||||
|
||||
@@ -140,11 +168,10 @@ class WtImportTimelineWizard(models.TransientModel):
|
||||
point = pos.get('point', {})
|
||||
lat = point.get('latE7', 0) / 1e7
|
||||
lng = point.get('lngE7', 0) / 1e7
|
||||
speed = pos.get('speedMetersPerSecond') or 0.0
|
||||
ts_str = pos.get('timestamp', '')
|
||||
if ts_str and lat and lng:
|
||||
ts = datetime.fromisoformat(ts_str.replace('Z', '+00:00'))
|
||||
positions.append({'ts': ts, 'lat': lat, 'lng': lng, 'speed': speed})
|
||||
positions.append({'ts': ts, 'lat': lat, 'lng': lng})
|
||||
|
||||
elif 'activityRecord' in raw:
|
||||
ar = raw['activityRecord']
|
||||
@@ -161,45 +188,58 @@ class WtImportTimelineWizard(models.TransientModel):
|
||||
positions.sort(key=lambda x: x['ts'])
|
||||
activities.sort(key=lambda x: x['ts'])
|
||||
|
||||
def get_activity_at(ts):
|
||||
if not activities:
|
||||
return 'UNKNOWN'
|
||||
nearest = min(activities, key=lambda a: abs((a['ts'] - ts).total_seconds()))
|
||||
return nearest['type']
|
||||
|
||||
# Cluster positions by proximity
|
||||
stops = []
|
||||
current_stop = []
|
||||
last_travel_mode = 'unknown'
|
||||
current_cluster = [positions[0]]
|
||||
|
||||
for pos in positions:
|
||||
activity = get_activity_at(pos['ts'])
|
||||
is_still = activity in STILL_ACTIVITIES or pos['speed'] < 0.5
|
||||
for pos in positions[1:]:
|
||||
prev = current_cluster[-1]
|
||||
dist = _distance_meters(prev['lat'], prev['lng'], pos['lat'], pos['lng'])
|
||||
|
||||
if is_still:
|
||||
current_stop.append(pos)
|
||||
if dist <= proximity_meters:
|
||||
# Same location — extend current cluster
|
||||
current_cluster.append(pos)
|
||||
else:
|
||||
last_travel_mode = _get_travel_mode(activity)
|
||||
if len(current_stop) >= 2:
|
||||
avg_lat = sum(p['lat'] for p in current_stop) / len(current_stop)
|
||||
avg_lng = sum(p['lng'] for p in current_stop) / len(current_stop)
|
||||
stops.append({
|
||||
'arrived_at': current_stop[0]['ts'],
|
||||
'departed_at': current_stop[-1]['ts'],
|
||||
'lat': avg_lat,
|
||||
'lng': avg_lng,
|
||||
'travel_mode': last_travel_mode,
|
||||
})
|
||||
current_stop = []
|
||||
# New location — save current cluster as a stop
|
||||
avg_lat = sum(p['lat'] for p in current_cluster) / len(current_cluster)
|
||||
avg_lng = sum(p['lng'] for p in current_cluster) / len(current_cluster)
|
||||
|
||||
if len(current_stop) >= 2:
|
||||
avg_lat = sum(p['lat'] for p in current_stop) / len(current_stop)
|
||||
avg_lng = sum(p['lng'] for p in current_stop) / len(current_stop)
|
||||
# Departed = last position in cluster
|
||||
# Next arrived = first position in new cluster
|
||||
# Travel mode = dominant activity between the two
|
||||
travel_mode = _dominant_travel_mode(
|
||||
activities,
|
||||
current_cluster[-1]['ts'],
|
||||
pos['ts']
|
||||
)
|
||||
|
||||
stops.append({
|
||||
'arrived_at': current_cluster[0]['ts'],
|
||||
'departed_at': current_cluster[-1]['ts'],
|
||||
'lat': avg_lat,
|
||||
'lng': avg_lng,
|
||||
'travel_mode': travel_mode,
|
||||
})
|
||||
current_cluster = [pos]
|
||||
|
||||
# Handle last cluster
|
||||
if current_cluster:
|
||||
avg_lat = sum(p['lat'] for p in current_cluster) / len(current_cluster)
|
||||
avg_lng = sum(p['lng'] for p in current_cluster) / len(current_cluster)
|
||||
stops.append({
|
||||
'arrived_at': current_stop[0]['ts'],
|
||||
'departed_at': current_stop[-1]['ts'],
|
||||
'arrived_at': current_cluster[0]['ts'],
|
||||
'departed_at': current_cluster[-1]['ts'],
|
||||
'lat': avg_lat,
|
||||
'lng': avg_lng,
|
||||
'travel_mode': last_travel_mode,
|
||||
'travel_mode': 'unknown',
|
||||
})
|
||||
|
||||
# For single-position stops (arrived == departed), estimate duration
|
||||
# using half the gap to the next stop
|
||||
for i, stop in enumerate(stops):
|
||||
if stop['arrived_at'] == stop['departed_at']:
|
||||
if i + 1 < len(stops):
|
||||
gap = (stops[i + 1]['arrived_at'] - stop['arrived_at']).total_seconds()
|
||||
stop['departed_at'] = stop['arrived_at'] + timedelta(seconds=gap / 2)
|
||||
|
||||
return stops
|
||||
|
||||
Reference in New Issue
Block a user