Github user ramkrish86 commented on a diff in the pull request:

    https://github.com/apache/phoenix/pull/12#discussion_r17411078
  
    --- Diff: 
phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java
 ---
    @@ -117,121 +125,59 @@ public boolean apply(HRegionLocation location) {
         }
     
         protected List<KeyRange> genKeyRanges(List<HRegionLocation> regions) {
    -        if (regions.isEmpty()) {
    -            return Collections.emptyList();
    +        if (regions.isEmpty()) { return Collections.emptyList(); }
    +        List<PColumnFamily> columnFamilies = 
this.tableRef.getTable().getColumnFamilies();
    +        // Collect all the guide posts across families. Sort them and then 
create a key range that starts 
    +        // from [] to [].  Then intersect it with the region boundary
    +        List<KeyRange> regionStartEndKey = 
Lists.newArrayListWithExpectedSize(regions.size());
    +        for (HRegionLocation region : regions) {
    +            
regionStartEndKey.add(KeyRange.getKeyRange(region.getRegionInfo().getStartKey(),
 region.getRegionInfo()
    +                    .getEndKey()));
             }
    -        
    -        StatsManager statsManager = 
context.getConnection().getQueryServices().getStatsManager();
    -        // the splits are computed as follows:
    -        //
    -        // let's suppose:
    -        // t = target concurrency
    -        // m = max concurrency
    -        // r = the number of regions we need to scan
    -        //
    -        // if r >= t:
    -        //    scan using regional boundaries
    -        // elif r > t/2:
    -        //    split each region in s splits such that:
    -        //    s = max(x) where s * x < m
    -        // else:
    -        //    split each region in s splits such that:
    -        //    s = max(x) where s * x < t
    -        //
    -        // The idea is to align splits with region boundaries. If rows are 
not evenly
    -        // distributed across regions, using this scheme compensates for 
regions that
    -        // have more rows than others, by applying tighter splits and 
therefore spawning
    -        // off more scans over the overloaded regions.
    -        int splitsPerRegion = getSplitsPerRegion(regions.size());
    -        // Create a multi-map of ServerName to List<KeyRange> which we'll 
use to round robin from to ensure
    -        // that we keep each region server busy for each query.
    -        ListMultimap<HRegionLocation,KeyRange> keyRangesPerRegion = 
ArrayListMultimap.create(regions.size(),regions.size() * splitsPerRegion);;
    -        if (splitsPerRegion == 1) {
    -            for (HRegionLocation region : regions) {
    -                keyRangesPerRegion.put(region, 
ParallelIterators.TO_KEY_RANGE.apply(region));
    -            }
    -        } else {
    -            // Maintain bucket for each server and then returns KeyRanges 
in round-robin
    -            // order to ensure all servers are utilized.
    -            for (HRegionLocation region : regions) {
    -                byte[] startKey = region.getRegionInfo().getStartKey();
    -                byte[] stopKey = region.getRegionInfo().getEndKey();
    -                boolean lowerUnbound = Bytes.compareTo(startKey, 
HConstants.EMPTY_START_ROW) == 0;
    -                boolean upperUnbound = Bytes.compareTo(stopKey, 
HConstants.EMPTY_END_ROW) == 0;
    -                /*
    -                 * If lower/upper unbound, get the min/max key from the 
stats manager.
    -                 * We use this as the boundary to split on, but we still 
use the empty
    -                 * byte as the boundary in the actual scan (in case our 
stats are out
    -                 * of date).
    -                 */
    -                if (lowerUnbound) {
    -                    startKey = statsManager.getMinKey(tableRef);
    -                    if (startKey == null) {
    -                        
keyRangesPerRegion.put(region,ParallelIterators.TO_KEY_RANGE.apply(region));
    -                        continue;
    -                    }
    -                }
    -                if (upperUnbound) {
    -                    stopKey = statsManager.getMaxKey(tableRef);
    -                    if (stopKey == null) {
    -                        
keyRangesPerRegion.put(region,ParallelIterators.TO_KEY_RANGE.apply(region));
    -                        continue;
    -                    }
    -                }
    -                
    -                byte[][] boundaries = null;
    -                // Both startKey and stopKey will be empty the first time
    -                if (Bytes.compareTo(startKey, stopKey) >= 0 || (boundaries 
= Bytes.split(startKey, stopKey, splitsPerRegion - 1)) == null) {
    -                    // Bytes.split may return null if the key space
    -                    // between start and end key is too small
    -                    
keyRangesPerRegion.put(region,ParallelIterators.TO_KEY_RANGE.apply(region));
    -                } else {
    -                    
keyRangesPerRegion.put(region,KeyRange.getKeyRange(lowerUnbound ? 
KeyRange.UNBOUND : boundaries[0], boundaries[1]));
    -                    if (boundaries.length > 1) {
    -                        for (int i = 1; i < boundaries.length-2; i++) {
    -                            
keyRangesPerRegion.put(region,KeyRange.getKeyRange(boundaries[i], true, 
boundaries[i+1], false));
    +        List<KeyRange> guidePosts = 
Lists.newArrayListWithCapacity(regions.size());
    +        List<byte[]> guidePostsBytes = 
Lists.newArrayListWithCapacity(regions.size());
    +        for (PColumnFamily fam : columnFamilies) {
    --- End diff --
    
    What you say is true.  If we collect across CF then if the variations are 
less we may end up creating more spilts but smaller in size.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

Reply via email to