6
6
7
7
import argparse
8
8
import os
9
+ import re
10
+
9
11
import numpy as np
10
12
11
13
from .core import ULog
@@ -50,6 +52,16 @@ def main():
50
52
args .time_s , args .time_e , args .ignore )
51
53
52
54
55
+ def read_string_data (data : ULog .Data , field_name : str , array_size : int , data_index : int ) -> str :
56
+ """ Parse a data field as string """
57
+ s = ''
58
+ for index in range (array_size ):
59
+ character = data .data [f'{ field_name } [{ index } ]' ][data_index ]
60
+ if character == 0 :
61
+ break
62
+ s += chr (character )
63
+ return s
64
+
53
65
def convert_ulog2csv (ulog_file_name , messages , output , delimiter , time_s , time_e ,
54
66
disable_str_exceptions = False ):
55
67
"""
@@ -80,17 +92,36 @@ def convert_ulog2csv(ulog_file_name, messages, output, delimiter, time_s, time_e
80
92
base_name = os .path .basename (output_file_prefix )
81
93
output_file_prefix = os .path .join (output , base_name )
82
94
95
+ array_pattern = re .compile (r"(.*)\[(.*?)\]" )
96
+
97
+ def get_fields (data : ULog .Data ) -> tuple [list [str ], dict [str , int ]]:
98
+ # use same field order as in the log, except for the timestamp
99
+ data_keys = []
100
+ string_array_sizes = {}
101
+ for f in data .field_data :
102
+ if f .field_name .startswith ('_padding' ):
103
+ continue
104
+ result = array_pattern .fullmatch (f .field_name )
105
+ if result and f .type_str == 'char' : # string (array of char's)
106
+ field , array_index = result .groups ()
107
+ array_index = int (array_index )
108
+ string_array_sizes [field ] = max (array_index + 1 , string_array_sizes .get (field , 0 ))
109
+ if array_index == 0 :
110
+ data_keys .append (field )
111
+ else :
112
+ data_keys .append (f .field_name )
113
+ data_keys .remove ('timestamp' )
114
+ data_keys .insert (0 , 'timestamp' ) # we want timestamp at first position
115
+ return data_keys , string_array_sizes
116
+
83
117
for d in data :
84
- fmt = '{0}_{1}_{2}.csv'
85
- output_file_name = fmt . format ( output_file_prefix , d . name . replace ( '/' , '_' ), d .multi_id )
86
- fmt = 'Writing {0} ({1} data points)'
87
- # print(fmt.format( output_file_name, len(d. data['timestamp'])) )
118
+ name_without_slash = d . name . replace ( '/' , '_' )
119
+ output_file_name = f' { output_file_prefix } _ { name_without_slash } _ { d .multi_id } .csv'
120
+ num_data_points = len ( d . data [ 'timestamp' ])
121
+ print (f'Writing { output_file_name } ( { num_data_points } data points)' )
88
122
with open (output_file_name , 'w' , encoding = 'utf-8' ) as csvfile :
89
123
90
- # use same field order as in the log, except for the timestamp
91
- data_keys = [f .field_name for f in d .field_data ]
92
- data_keys .remove ('timestamp' )
93
- data_keys .insert (0 , 'timestamp' ) # we want timestamp at first position
124
+ data_keys , string_array_sizes = get_fields (d )
94
125
95
126
# we don't use np.savetxt, because we have multiple arrays with
96
127
# potentially different data types. However the following is quite
@@ -110,7 +141,11 @@ def convert_ulog2csv(ulog_file_name, messages, output, delimiter, time_s, time_e
110
141
last_elem = len (data_keys )- 1
111
142
for i in range (time_s_i , time_e_i ):
112
143
for k in range (len (data_keys )):
113
- csvfile .write (str (d .data [data_keys [k ]][i ]))
144
+ if data_keys [k ] in string_array_sizes : # string
145
+ s = read_string_data (d , data_keys [k ], string_array_sizes [data_keys [k ]], i )
146
+ csvfile .write (s )
147
+ else :
148
+ csvfile .write (str (d .data [data_keys [k ]][i ]))
114
149
if k != last_elem :
115
150
csvfile .write (delimiter )
116
151
csvfile .write ('\n ' )
0 commit comments