Skip to content

Commit cc8992f

Browse files
committed
Merge remote-tracking branch 'origin/develop' into fb_moveWorkflowCode
2 parents 46160d8 + 8963dc5 commit cc8992f

30 files changed

+527
-337
lines changed

api/src/org/labkey/api/data/DbScope.java

Lines changed: 34 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
import org.jetbrains.annotations.Nullable;
2727
import org.junit.Assert;
2828
import org.junit.Test;
29+
import org.labkey.api.action.ApiUsageException;
2930
import org.labkey.api.audit.TransactionAuditProvider;
3031
import org.labkey.api.cache.Cache;
3132
import org.labkey.api.data.ConnectionWrapper.Closer;
@@ -2166,8 +2167,7 @@ public static void closeAllConnectionsForCurrentThread()
21662167
}
21672168
try
21682169
{
2169-
LOG.warn("Forcing close of still-pending transaction object. Current stack is ", new Throwable());
2170-
LOG.warn("Forcing close of still-pending transaction object started at ", t._creation);
2170+
LOG.warn("Forcing close of still-pending transaction object started at {}. Current stack is ", t._creation, new Throwable());
21712171
t.close();
21722172
}
21732173
catch (ConnectionAlreadyReleasedException ignored)
@@ -2385,12 +2385,17 @@ public void run(TransactionImpl transaction)
23852385
// Copy to avoid ConcurrentModificationExceptions, need to retain original order from LinkedHashMap
23862386
List<Runnable> tasks = new ArrayList<>(getRunnables(transaction).keySet());
23872387

2388-
for (Runnable task : tasks)
2388+
try
23892389
{
2390-
task.run();
2390+
for (Runnable task : tasks)
2391+
{
2392+
task.run();
2393+
}
2394+
}
2395+
finally
2396+
{
2397+
transaction.closeCaches();
23912398
}
2392-
2393-
transaction.closeCaches();
23942399
}
23952400

23962401
public <T extends Runnable> T add(TransactionImpl transaction, T task)
@@ -2715,16 +2720,17 @@ public void commit()
27152720
conn.commit();
27162721
conn.setAutoCommit(true);
27172722
LOG.debug("setAutoCommit(true)");
2718-
if (null != _closeOnClose)
2719-
try { _closeOnClose.close(); } catch (Exception ignore) {}
27202723
}
27212724
finally
27222725
{
2726+
if (null != _closeOnClose)
2727+
try { _closeOnClose.close(); } catch (Exception ignore) {}
27232728
if (null != conn)
27242729
conn.internalClose();
2725-
}
27262730

2727-
popCurrentTransaction();
2731+
// Make sure to pop whether we successfully committed or not
2732+
popCurrentTransaction();
2733+
}
27282734

27292735
CommitTaskOption.POSTCOMMIT.run(this);
27302736
}
@@ -3164,6 +3170,24 @@ public void testAutoCommitFailure()
31643170
closeAllConnectionsForCurrentThread();
31653171
}
31663172

3173+
@Test
3174+
public void tesCommitTaskFailure()
3175+
{
3176+
String message = "Expected failure";
3177+
try (Transaction t = getLabKeyScope().ensureTransaction())
3178+
{
3179+
t.addCommitTask(() -> { throw new ApiUsageException("Expected failure"); }, CommitTaskOption.PRECOMMIT);
3180+
t.commit();
3181+
fail("Shouldn't have gotten here, expected ApiUsageException");
3182+
}
3183+
catch (ApiUsageException e)
3184+
{
3185+
assertEquals("Bad message", message, e.getMessage());
3186+
}
3187+
assertFalse(getLabKeyScope().isTransactionActive());
3188+
closeAllConnectionsForCurrentThread();
3189+
}
3190+
31673191
@Test
31683192
public void testLockReleasedException()
31693193
{

api/src/org/labkey/api/data/JsonWriter.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,9 @@ public static Map<String, Object> getMetaData(DisplayColumn dc, FieldKey fieldKe
180180

181181
props.put("wrappedColumnName", cinfo == null ? null : cinfo.getWrappedColumnName());
182182
props.put("valueExpression", cinfo == null ? null : cinfo.getValueExpression());
183-
props.put("displayWidth", cinfo == null ? null : cinfo.getDisplayWidth());
183+
184+
if (cinfo != null && cinfo.getDisplayWidth() != null && !cinfo.getDisplayWidth().isEmpty())
185+
props.put("displayWidth", cinfo.getDisplayWidth());
184186

185187
ColumnInfo displayField = dc.getDisplayColumnInfo();
186188
if (displayField != null && displayField != cinfo)

api/src/org/labkey/api/data/SimpleFilter.java

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -664,11 +664,19 @@ public static DatabaseIdentifier getAliasForColumnFilter(SqlDialect dialect, Col
664664

665665
public static class InClause extends MultiValuedFilterClause
666666
{
667+
private InClauseGenerator _tempTableGenerator = null;
668+
667669
public InClause(FieldKey fieldKey, Collection<?> params)
668670
{
669671
this(fieldKey, params, false, false);
670672
}
671673

674+
public InClause(FieldKey fieldKey, Collection<?> params, InClauseGenerator tempTableGenerator)
675+
{
676+
this(fieldKey, params, false, false);
677+
_tempTableGenerator = tempTableGenerator;
678+
}
679+
672680
public InClause(FieldKey fieldKey, Collection<?> params, boolean urlClause)
673681
{
674682
this(fieldKey, params, urlClause, false);
@@ -837,7 +845,10 @@ public SQLFragment toSQLFragment(Map<FieldKey, ? extends ColumnInfo> columnMap,
837845
in.appendIdentifier(alias);
838846

839847
// Dialect may want to generate database-specific SQL, especially for very large IN clauses
840-
dialect.appendInClauseSql(in, convertedParams);
848+
if (null == _tempTableGenerator)
849+
dialect.appendInClauseSql(in, convertedParams);
850+
else
851+
dialect.appendInClauseSqlWithCustomInClauseGenerator(in, convertedParams, _tempTableGenerator);
841852

842853
if (isIncludeNull())
843854
{

api/src/org/labkey/api/data/dialect/BasePostgreSqlDialect.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -292,11 +292,11 @@ public String addReselect(SQLFragment sql, ColumnInfo column, @Nullable String p
292292
@Override
293293
public SQLFragment appendInClauseSql(SQLFragment sql, @NotNull Collection<?> params)
294294
{
295-
return appendInClauseSql(sql, params, _tempTableInClauseGenerator);
295+
return appendInClauseSqlWithCustomInClauseGenerator(sql, params, _tempTableInClauseGenerator);
296296
}
297297

298298
@Override
299-
public SQLFragment appendInClauseSql(SQLFragment sql, @NotNull Collection<?> params, InClauseGenerator tempTableGenerator)
299+
public SQLFragment appendInClauseSqlWithCustomInClauseGenerator(SQLFragment sql, @NotNull Collection<?> params, InClauseGenerator tempTableGenerator)
300300
{
301301
if (params.size() >= TEMPTABLE_GENERATOR_MINSIZE)
302302
{

api/src/org/labkey/api/data/dialect/SqlDialect.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -530,11 +530,11 @@ protected Set<String> getJdbcKeywords(SqlExecutor executor) throws SQLException,
530530
// Most callers should use this method
531531
public SQLFragment appendInClauseSql(SQLFragment sql, @NotNull Collection<?> params)
532532
{
533-
return appendInClauseSql(sql, params, null);
533+
return appendInClauseSqlWithCustomInClauseGenerator(sql, params, null);
534534
}
535535

536-
// Use in cases where the default temp schema won't do, e.g., you need to apply a large IN clause in an external data source
537-
public SQLFragment appendInClauseSql(SQLFragment sql, @NotNull Collection<?> params, InClauseGenerator tempTableGenerator)
536+
// Use only in cases where the default temp-table generator won't do, e.g., you need to apply a large IN clause in an external data source
537+
public SQLFragment appendInClauseSqlWithCustomInClauseGenerator(SQLFragment sql, @NotNull Collection<?> params, InClauseGenerator tempTableGenerator)
538538
{
539539
return DEFAULT_GENERATOR.appendInClauseSql(sql, params);
540540
}

api/src/org/labkey/api/migration/DatabaseMigrationConfiguration.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ default void beforeMigration(){}
1919
DbScope getTargetScope();
2020
@NotNull Set<String> getSkipSchemas();
2121
Predicate<String> getColumnNameFilter();
22-
@Nullable TableSelector getTableSelector(DbSchemaType schemaType, TableInfo sourceTable, TableInfo targetTable, Set<String> selectColumnNames, MigrationSchemaHandler schemaHandler);
22+
@Nullable TableSelector getTableSelector(DbSchemaType schemaType, TableInfo sourceTable, TableInfo targetTable, Set<String> selectColumnNames, MigrationSchemaHandler schemaHandler, @Nullable MigrationTableHandler tableHandler);
2323
default void copyAttachments(DbSchema sourceSchema, DbSchema targetSchema, MigrationSchemaHandler schemaHandler){}
2424
default void afterMigration(){}
2525
}

api/src/org/labkey/api/migration/DatabaseMigrationService.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ default void migrate(DatabaseMigrationConfiguration configuration)
4848

4949
// By default, no-op implementations
5050
default void registerSchemaHandler(MigrationSchemaHandler schemaHandler) {}
51+
default void registerTableHandler(MigrationTableHandler tableHandler) {}
5152
default void registerMigrationFilter(MigrationFilter filter) {}
5253

5354
default @Nullable MigrationFilter getMigrationFilter(String propertyName)

api/src/org/labkey/api/migration/DefaultDatabaseMigrationConfiguration.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ public Predicate<String> getColumnNameFilter()
4343
}
4444

4545
@Override
46-
public TableSelector getTableSelector(DbSchemaType schemaType, TableInfo sourceTable, TableInfo targetTable, Set<String> selectColumnNames, MigrationSchemaHandler schemaHandler)
46+
public TableSelector getTableSelector(DbSchemaType schemaType, TableInfo sourceTable, TableInfo targetTable, Set<String> selectColumnNames, MigrationSchemaHandler schemaHandler, @Nullable MigrationTableHandler tableHandler)
4747
{
4848
return null;
4949
}

api/src/org/labkey/api/migration/DefaultMigrationSchemaHandler.java

Lines changed: 32 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
package org.labkey.api.migration;
22

3+
import org.apache.logging.log4j.Logger;
34
import org.jetbrains.annotations.NotNull;
45
import org.jetbrains.annotations.Nullable;
56
import org.labkey.api.attachments.AttachmentService;
@@ -27,8 +28,11 @@
2728
import org.labkey.api.query.FieldKey;
2829
import org.labkey.api.query.SchemaKey;
2930
import org.labkey.api.query.TableSorter;
31+
import org.labkey.api.util.ConfigurationException;
3032
import org.labkey.api.util.GUID;
33+
import org.labkey.api.util.JobRunner;
3134
import org.labkey.api.util.StringUtilsLabKey;
35+
import org.labkey.api.util.logging.LogHelper;
3236

3337
import java.util.ArrayList;
3438
import java.util.Arrays;
@@ -38,10 +42,13 @@
3842
import java.util.LinkedHashSet;
3943
import java.util.List;
4044
import java.util.Set;
45+
import java.util.concurrent.TimeUnit;
4146
import java.util.stream.Collectors;
4247

4348
public class DefaultMigrationSchemaHandler implements MigrationSchemaHandler
4449
{
50+
private static final Logger LOG = LogHelper.getLogger(DefaultMigrationSchemaHandler.class, "Migration shutdown status");
51+
4552
private final DbSchema _schema;
4653

4754
public DefaultMigrationSchemaHandler(DbSchema schema)
@@ -77,7 +84,7 @@ public List<TableInfo> getTablesToCopy()
7784

7885
if (!allTables.isEmpty())
7986
{
80-
DatabaseMigrationService.LOG.info("These tables were removed by TableSorter: {}", allTables);
87+
LOG.info("These tables were removed by TableSorter: {}", allTables);
8188
}
8289

8390
return sortedTables.stream()
@@ -250,12 +257,13 @@ public void copyAttachments(DatabaseMigrationConfiguration configuration, DbSche
250257
Collection<String> entityIds = new SqlSelector(targetSchema, sql).getCollection(String.class);
251258
SQLFragment selectParents = new SQLFragment("Parent");
252259
// This query against the source database is likely to contain a large IN clause, so use an alternative InClauseGenerator
253-
sourceSchema.getSqlDialect().appendInClauseSql(selectParents, entityIds, getTempTableInClauseGenerator(sourceSchema.getScope()));
260+
sourceSchema.getSqlDialect().appendInClauseSqlWithCustomInClauseGenerator(selectParents, entityIds, getTempTableInClauseGenerator(sourceSchema.getScope()));
254261
copyAttachments(configuration, sourceSchema, new SQLClause(selectParents), type);
255262
}
256-
257-
// TODO: fail if type.getSelectParentEntityIdsSql() returns null?
258-
// TODO: throw if some registered AttachmentType is not seen
263+
else
264+
{
265+
throw new ConfigurationException("AttachmentType \"" + type.getUniqueName() + "\" is not configured to find parent EntityIds!");
266+
}
259267
});
260268
}
261269

@@ -267,33 +275,46 @@ protected InClauseGenerator getTempTableInClauseGenerator(DbScope sourceScope)
267275
}
268276

269277
private static final Set<AttachmentType> SEEN = new HashSet<>();
278+
private static final JobRunner ATTACHMENT_JOB_RUNNER = new JobRunner("Attachment JobRunner", 1);
270279

271280
// Copy all core.Documents rows that match the provided filter clause
272-
protected void copyAttachments(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, FilterClause filterClause, AttachmentType... type)
281+
protected final void copyAttachments(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, FilterClause filterClause, AttachmentType... type)
273282
{
274283
SEEN.addAll(Arrays.asList(type));
275284
String additionalMessage = " associated with " + Arrays.stream(type).map(t -> t.getClass().getSimpleName()).collect(Collectors.joining(", "));
276285
TableInfo sourceDocumentsTable = sourceSchema.getScope().getSchema("core", DbSchemaType.Migration).getTable("Documents");
277286
TableInfo targetDocumentsTable = CoreSchema.getInstance().getTableInfoDocuments();
278-
DatabaseMigrationService.get().copySourceTableToTargetTable(configuration, sourceDocumentsTable, targetDocumentsTable, DbSchemaType.Module, false, additionalMessage, new DefaultMigrationSchemaHandler(CoreSchema.getInstance().getSchema())
287+
288+
// Queue up the core.Documents transfers and let them run in the background
289+
ATTACHMENT_JOB_RUNNER.execute(() -> DatabaseMigrationService.get().copySourceTableToTargetTable(configuration, sourceDocumentsTable, targetDocumentsTable, DbSchemaType.Module, false, additionalMessage, new DefaultMigrationSchemaHandler(CoreSchema.getInstance().getSchema())
279290
{
280291
@Override
281292
public FilterClause getTableFilterClause(TableInfo sourceTable, Set<GUID> containers)
282293
{
283294
return filterClause;
284295
}
285-
});
296+
}));
286297
}
287298

288-
public static void logUnseenAttachmentTypes()
299+
// Global (not schema- or configuration-specific) cleanup
300+
public static void afterMigration() throws InterruptedException
289301
{
302+
// Report any unseen attachment types
290303
Set<AttachmentType> unseen = new HashSet<>(AttachmentService.get().getAttachmentTypes());
291304
unseen.removeAll(SEEN);
292305

293306
if (unseen.isEmpty())
294-
DatabaseMigrationService.LOG.info("All AttachmentTypes have been seen");
307+
LOG.info("All AttachmentTypes have been seen");
308+
else
309+
throw new ConfigurationException("These AttachmentTypes have not been seen: " + unseen.stream().map(type -> type.getClass().getSimpleName()).collect(Collectors.joining(", ")));
310+
311+
// Shut down the attachment JobRunner
312+
LOG.info("Waiting for core.Documents background transfer to complete");
313+
ATTACHMENT_JOB_RUNNER.shutdown();
314+
if (ATTACHMENT_JOB_RUNNER.awaitTermination(1, TimeUnit.HOURS))
315+
LOG.info("core.Documents background transfer is complete");
295316
else
296-
DatabaseMigrationService.LOG.info("These AttachmentTypes have not been seen: {}", unseen.stream().map(type -> type.getClass().getSimpleName()).collect(Collectors.joining(", ")));
317+
LOG.error("core.Documents background transfer did not complete after one hour! Giving up.");
297318
}
298319

299320
@Override
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
package org.labkey.api.migration;
2+
3+
import org.labkey.api.data.SimpleFilter;
4+
import org.labkey.api.data.TableInfo;
5+
import org.labkey.api.util.GUID;
6+
7+
import java.util.Set;
8+
9+
/**
10+
* Rarely needed, this interface lets a module filter the rows of another module's table. The specific use case: LabBook
11+
* needs to filter the compliance.SignedSnapshots table of snapshots associated with Notebooks that are excluded by a
12+
* NotebookFilter.
13+
*/
14+
public interface MigrationTableHandler
15+
{
16+
TableInfo getTableInfo();
17+
void adjustFilter(TableInfo sourceTable, SimpleFilter filter, Set<GUID> containers);
18+
}

0 commit comments

Comments
 (0)