@@ -90,13 +90,13 @@ type DBOptions struct {
90
90
// TODO :: revisit this logic
91
91
func (d * DBOptions ) ValidateSettings () error {
92
92
read := & settings {}
93
- err := mapstructure .WeakDecode (d .ReadSettings , & read )
93
+ err := mapstructure .Decode (d .ReadSettings , read )
94
94
if err != nil {
95
95
return fmt .Errorf ("read settings: %w" , err )
96
96
}
97
97
98
98
write := & settings {}
99
- err = mapstructure .WeakDecode (d .WriteSettings , & write )
99
+ err = mapstructure .Decode (d .WriteSettings , write )
100
100
if err != nil {
101
101
return fmt .Errorf ("write settings: %w" , err )
102
102
}
@@ -146,7 +146,21 @@ func (d *DBOptions) ValidateSettings() error {
146
146
write .MaxMemory = fmt .Sprintf ("%d bytes" , int64 (bytes )/ 2 )
147
147
}
148
148
149
- if read .Threads == 0 && write .Threads == 0 {
149
+ var readThread , writeThread int
150
+ if read .Threads != "" {
151
+ readThread , err = strconv .Atoi (read .Threads )
152
+ if err != nil {
153
+ return fmt .Errorf ("unable to parse read threads: %w" , err )
154
+ }
155
+ }
156
+ if write .Threads != "" {
157
+ writeThread , err = strconv .Atoi (write .Threads )
158
+ if err != nil {
159
+ return fmt .Errorf ("unable to parse write threads: %w" , err )
160
+ }
161
+ }
162
+
163
+ if readThread == 0 && writeThread == 0 {
150
164
connector , err := duckdb .NewConnector ("" , nil )
151
165
if err != nil {
152
166
return fmt .Errorf ("unable to create duckdb connector: %w" , err )
@@ -162,31 +176,31 @@ func (d *DBOptions) ValidateSettings() error {
162
176
return fmt .Errorf ("unable to get threads: %w" , err )
163
177
}
164
178
165
- read .Threads = threads / 2
166
- write .Threads = threads / 2
179
+ read .Threads = strconv . Itoa ( threads / 2 )
180
+ write .Threads = strconv . Itoa ( threads / 2 )
167
181
}
168
182
169
- if read . Threads == 0 != (write . Threads == 0 ) {
183
+ if readThread == 0 != (writeThread == 0 ) {
170
184
// only one is defined
171
185
var threads int
172
- if read . Threads != 0 {
173
- threads = read . Threads
186
+ if readThread != 0 {
187
+ threads = readThread
174
188
} else {
175
- threads = write . Threads
189
+ threads = writeThread
176
190
}
177
191
178
- read .Threads = threads / 2
179
- write .Threads = threads / 2
192
+ read .Threads = strconv . Itoa ( threads / 2 )
193
+ write .Threads = strconv . Itoa ( threads / 2 )
180
194
}
181
195
182
- err = mapstructure .Decode (read , & d .ReadSettings )
196
+ err = mapstructure .WeakDecode (read , & d .ReadSettings )
183
197
if err != nil {
184
- return fmt .Errorf ("read settings: %w" , err )
198
+ return fmt .Errorf ("failed to update read settings: %w" , err )
185
199
}
186
200
187
- err = mapstructure .Decode (write , & d .WriteSettings )
201
+ err = mapstructure .WeakDecode (write , & d .WriteSettings )
188
202
if err != nil {
189
- return fmt .Errorf ("write settings: %w" , err )
203
+ return fmt .Errorf ("failed to update write settings: %w" , err )
190
204
}
191
205
return nil
192
206
}
@@ -212,6 +226,7 @@ type InsertTableOptions struct {
212
226
213
227
// NewDB creates a new DB instance.
214
228
// This can be a slow operation if the backup is large.
229
+ // dbIdentifier is a unique identifier for the database reported in metrics.
215
230
func NewDB (ctx context.Context , dbIdentifier string , opts * DBOptions ) (DB , error ) {
216
231
if dbIdentifier == "" {
217
232
return nil , fmt .Errorf ("db identifier cannot be empty" )
@@ -225,13 +240,13 @@ func NewDB(ctx context.Context, dbIdentifier string, opts *DBOptions) (DB, error
225
240
db := & db {
226
241
dbIdentifier : dbIdentifier ,
227
242
opts : opts ,
228
- readPath : filepath .Join (opts .LocalPath , dbIdentifier , "read" ),
229
- writePath : filepath .Join (opts .LocalPath , dbIdentifier , "write" ),
243
+ readPath : filepath .Join (opts .LocalPath , "read" ),
244
+ writePath : filepath .Join (opts .LocalPath , "write" ),
230
245
writeDirty : true ,
231
246
logger : opts .Logger ,
232
247
}
233
248
if opts .BackupProvider != nil {
234
- db .backup = blob . PrefixedBucket ( opts .BackupProvider .bucket , dbIdentifier )
249
+ db .backup = opts .BackupProvider .bucket
235
250
}
236
251
// create read and write paths
237
252
err = os .MkdirAll (db .readPath , fs .ModePerm )
@@ -875,44 +890,43 @@ func (d *db) attachDBs(ctx context.Context, db *sqlx.DB, path string, read bool)
875
890
if err != nil {
876
891
return err
877
892
}
893
+
894
+ var views []string
878
895
for _ , entry := range entries {
879
896
if ! entry .IsDir () {
880
897
continue
881
898
}
882
899
version , exist , err := tableVersion (path , entry .Name ())
883
900
if err != nil {
884
901
d .logger .Error ("error in fetching db version" , slog .String ("table" , entry .Name ()), slog .Any ("error" , err ))
885
- _ = os .RemoveAll (path )
902
+ _ = os .RemoveAll (filepath . Join ( path , entry . Name ()) )
886
903
continue
887
904
}
888
905
if ! exist {
889
- _ = os .RemoveAll (path )
906
+ _ = os .RemoveAll (filepath . Join ( path , entry . Name ()) )
890
907
continue
891
908
}
892
- path := filepath .Join (path , entry .Name (), version )
909
+ versionPath := filepath .Join (path , entry .Name (), version )
893
910
894
911
// read meta file
895
- f , err := os .ReadFile (filepath .Join (path , "meta.json" ))
912
+ f , err := os .ReadFile (filepath .Join (versionPath , "meta.json" ))
896
913
if err != nil {
897
- _ = os .RemoveAll (path )
914
+ _ = os .RemoveAll (versionPath )
898
915
d .logger .Error ("error in reading meta file" , slog .String ("table" , entry .Name ()), slog .Any ("error" , err ))
899
916
// May be keep it as a config to return error or continue ?
900
917
continue
901
918
}
902
919
var meta meta
903
920
err = json .Unmarshal (f , & meta )
904
921
if err != nil {
905
- _ = os .RemoveAll (path )
922
+ _ = os .RemoveAll (versionPath )
906
923
d .logger .Error ("error in unmarshalling meta file" , slog .String ("table" , entry .Name ()), slog .Any ("error" , err ))
907
924
continue
908
925
}
909
926
910
927
if meta .ViewSQL != "" {
911
928
// table is a view
912
- _ , err := db .ExecContext (ctx , fmt .Sprintf ("CREATE OR REPLACE VIEW %s AS %s" , safeSQLName (entry .Name ()), meta .ViewSQL ))
913
- if err != nil {
914
- return err
915
- }
929
+ views = append (views , fmt .Sprintf ("CREATE OR REPLACE VIEW %s AS (%s\n )" , safeSQLName (entry .Name ()), meta .ViewSQL ))
916
930
continue
917
931
}
918
932
switch BackupFormat (meta .Format ) {
@@ -922,10 +936,10 @@ func (d *db) attachDBs(ctx context.Context, db *sqlx.DB, path string, read bool)
922
936
if read {
923
937
readMode = " (READ_ONLY)"
924
938
}
925
- _ , err := db .ExecContext (ctx , fmt .Sprintf ("ATTACH %s AS %s %s" , safeSQLString (filepath .Join (path , "data.db" )), safeSQLName (dbName ), readMode ))
939
+ _ , err := db .ExecContext (ctx , fmt .Sprintf ("ATTACH %s AS %s %s" , safeSQLString (filepath .Join (versionPath , "data.db" )), safeSQLName (dbName ), readMode ))
926
940
if err != nil {
927
941
d .logger .Error ("error in attaching db" , slog .String ("table" , entry .Name ()), slog .Any ("error" , err ))
928
- _ = os .RemoveAll (filepath . Join ( path ) )
942
+ _ = os .RemoveAll (versionPath )
929
943
continue
930
944
}
931
945
@@ -944,6 +958,13 @@ func (d *db) attachDBs(ctx context.Context, db *sqlx.DB, path string, read bool)
944
958
return fmt .Errorf ("unknown backup format %q" , meta .Format )
945
959
}
946
960
}
961
+ // create views after attaching all the DBs since views can depend on other tables
962
+ for _ , view := range views {
963
+ _ , err := db .ExecContext (ctx , view )
964
+ if err != nil {
965
+ return err
966
+ }
967
+ }
947
968
return nil
948
969
}
949
970
@@ -1091,12 +1112,12 @@ func retry(maxRetries int, delay time.Duration, fn func() error) error {
1091
1112
}
1092
1113
1093
1114
func dbName (name string ) string {
1094
- return safeSQLName ( fmt .Sprintf ("%s__data__db" , name ) )
1115
+ return fmt .Sprintf ("%s__data__db" , name )
1095
1116
}
1096
1117
1097
1118
type settings struct {
1098
1119
MaxMemory string `mapstructure:"max_memory"`
1099
- Threads int `mapstructure:"threads"`
1120
+ Threads string `mapstructure:"threads"`
1100
1121
// Can be more settings
1101
1122
}
1102
1123
0 commit comments