htran1 commented on a change in pull request #2722: GOBBLIN-865: Add feature 
that enables PK-chunking in partition
URL: https://github.com/apache/incubator-gobblin/pull/2722#discussion_r353909717
 
 

 ##########
 File path: 
gobblin-salesforce/src/main/java/org/apache/gobblin/salesforce/SalesforceSource.java
 ##########
 @@ -146,12 +153,120 @@ protected void addLineageSourceInfo(SourceState 
sourceState, SourceEntity entity
 
   @Override
   protected List<WorkUnit> generateWorkUnits(SourceEntity sourceEntity, 
SourceState state, long previousWatermark) {
+    log.info("====sfdc connector with pkchk===="); // TODO: remove this after 
merge back to OS.
+    List<WorkUnit> workUnits = null;
+    String partitionType = state.getProp(SALESFORCE_PARTITION_TYPE, "");
+    if (partitionType.equals("PK_CHUNKING")) {
+      // pk-chunking only supports start-time by 
source.querybased.start.value, and does not support end-time.
+      // always ingest data later than or equal source.querybased.start.value.
+      // we should only pk chunking based work units only in case of 
snapshot/full ingestion
+      workUnits = generateWorkUnitsPkChunking(sourceEntity, state, 
previousWatermark);
+    } else {
+      workUnits = generateWorkUnitsStrategy(sourceEntity, state, 
previousWatermark);
+    }
+    log.info("====Generated {} workUnit(s)====", workUnits.size());
+    return workUnits;
+  }
+
+  /**
+   * generate workUnit for pk chunking
+   */
+  private List<WorkUnit> generateWorkUnitsPkChunking(SourceEntity 
sourceEntity, SourceState state, long previousWatermark) {
+    JobIdAndBatchIdResultIdList jobIdAndBatchIdResultIdList = 
executeQueryWithPkChunking(state, previousWatermark);
+    return createWorkUnits(sourceEntity, state, jobIdAndBatchIdResultIdList);
+  }
+
+  private JobIdAndBatchIdResultIdList executeQueryWithPkChunking(
+      SourceState sourceState,
+      long previousWatermark
+  ) throws RuntimeException {
+    State state = new State(sourceState);
+    WorkUnit workUnit = WorkUnit.createEmpty();
+    WorkUnitState workUnitState = new WorkUnitState(workUnit, state);
+    workUnitState.setId("Execute pk-chunking");
+    try {
+      SalesforceExtractor salesforceExtractor = (SalesforceExtractor) 
this.getExtractor(workUnitState);
+      Partitioner partitioner = new Partitioner(sourceState);
+      if (isEarlyStopEnabled(state) && partitioner.isFullDump()) {
+        throw new UnsupportedOperationException("Early stop mode cannot work 
with full dump mode.");
+      }
+      Partition partition = partitioner.getGlobalPartition(previousWatermark);
+      String condition = "";
+      Date startDate = Utils.toDate(partition.getLowWatermark(), 
Partitioner.WATERMARKTIMEFORMAT);
+      String field = 
sourceState.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY);
+      // pk-chunking only supports start-time by 
source.querybased.start.value, and does not support end-time.
+      // always ingest data later than or equal source.querybased.start.value.
+      // we should only pk chunking based work units only in case of 
snapshot/full ingestion
+      if (startDate != null && field != null) {
+        String lowWatermarkDate = Utils.dateToString(startDate, 
SalesforceExtractor.SALESFORCE_TIMESTAMP_FORMAT);
+        condition = field + " >= " + lowWatermarkDate;
+      }
+      Predicate predicate = new Predicate(null, 0, condition, "", null);
+      List<Predicate> predicateList = Arrays.asList(predicate);
+      String entity = sourceState.getProp(ConfigurationKeys.SOURCE_ENTITY);
+
+      if (state.contains(PK_CHUNKING_TEST_JOB_ID)) {
+        String jobId = state.getProp(PK_CHUNKING_TEST_JOB_ID, "");
+        log.info("---Skip query, fetching result files directly for 
[jobId={}]", jobId);
+        String batchIdListStr = state.getProp(PK_CHUNKING_TEST_BATCH_ID_LIST);
+        return salesforceExtractor.getQueryResultIdsPkChunkingFetchOnly(jobId, 
batchIdListStr);
+      } else {
+        log.info("---Pk Chunking query submit.");
+        return salesforceExtractor.getQueryResultIdsPkChunking(entity, 
predicateList);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   *  Create work units by taking a bulkJobId.
+   *  The work units won't contain a query in this case. Instead they will 
contain a BulkJobId and a list of `batchId:resultId`
+   *  So in extractor, the work to do is just to fetch the resultSet files.
+   */
+  private List<WorkUnit> createWorkUnits(
+      SourceEntity sourceEntity,
+      SourceState state,
+      JobIdAndBatchIdResultIdList jobIdAndBatchIdResultIdList
+  ) {
+    String nameSpaceName = 
state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
+    Extract.TableType tableType = 
Extract.TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
+    String outputTableName = sourceEntity.getDestTableName();
+    Extract extract = createExtract(tableType, nameSpaceName, outputTableName);
+
+    List<WorkUnit> workUnits = Lists.newArrayList();
+    int partitionNumber = state.getPropAsInt(SOURCE_MAX_NUMBER_OF_PARTITIONS, 
1);
+    List<BatchIdAndResultId> batchResultIds = 
jobIdAndBatchIdResultIdList.getBatchIdAndResultIdList();
+    int total = batchResultIds.size();
+    // size of every partition should be: math.ceil(total/partitionNumber), 
use simpler way: (total+partitionNumber-1)/partitionNumber
+    int sizeOfPartition = (total + partitionNumber - 1) / partitionNumber;
+    List<List<BatchIdAndResultId>> partitionedResultIds = 
Lists.partition(batchResultIds, sizeOfPartition);
+    log.info("----partition strategy: max-parti={}, size={}, actual-parti={}, 
total={}", partitionNumber, sizeOfPartition, partitionedResultIds.size(), 
total);
+    for (List<BatchIdAndResultId> resultIds : partitionedResultIds) {
 
 Review comment:
   Can you add new lines between the blocks?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to