[ 
https://issues.apache.org/jira/browse/PHOENIX-3007?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Krzysztof Bień updated PHOENIX-3007:
------------------------------------
    Description: 
When try to write too many rows in one commit the client suspends forever.

It occurs in both modes: for transactional and non transactional tables.
However:
When using non-transactional table the data are succsessfully written on the 
server side after expected amount of time (client suspends).
When using transactional table the data are never written on the server side 
(client suspends).

I suspect that is connected with the timing. When a transaction lasts longer 
than approx. 100/120 seconds it will suspend forever.

This is my tests for number of rows:
             | ROWS   | MODE       | RESULT | TIME         |
             | 100 000 | non trans  | OK          | 45 s          |
             | 200 000 | non trans  | FAIL        | suspends |
             | 10 000   | trans         | OK          |  21 s         |
             |  80 000   |  trans     | FAIL        | suspends |

I have  tested in on many environments (cloudera 5.5 quickstart, tried also 
newer version of zookeeper: 3.4.8).

Here it is test program:

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.util.Properties;
import org.apache.phoenix.query.QueryServices;


/*
 
 SCHEMA:
 create table test_nontrans (id bigint primary key, val varchar);
 
 */

public class Main_non_trans {


                public static void main(String args[]) throws Exception {

                        
                        String MAX_MUTATIONS = "10000000";
                    Properties prop = new Properties();
                    prop.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, 
MAX_MUTATIONS);
                    
prop.setProperty(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, 
MAX_MUTATIONS);
                        
                    
                    //connect
                    Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
                    Connection conn =  
DriverManager.getConnection("jdbc:phoenix:192.168.13.75:", prop);
                    conn.setAutoCommit(false);
                    System.out.println("got connection");
                    
                    
                    //write
                    final long start = System.currentTimeMillis();
                    PreparedStatement statement = conn.prepareStatement("upsert 
into test_nontrans(id, val) values(?, ?)");
  
                    
                    for(long i=1; i<=160000; i++)
                    {
                        statement.setLong(1, i);
                        statement.setString(2, "value " + i);
                        statement.execute();
                    }
                        
                    //commit
                    System.out.println("commiting...");
                    conn.commit();
                    
                    
                    System.out.println("Done in: " + 
(System.currentTimeMillis() - start)/1000.0 );
                    
                    
                  }     
}






  was:
When try to write too many rows in one commit the client suspends forever.

It occurs in both modes: for transactional and non transactional tables.
However:
When using non-transactional table the data are succsessfully written on the 
server side after expected amount of time (client suspends).
When using transactional table the data are never written on the server side 
(client suspends).

I suspect that is connected with the timing. When a transaction lasts longer 
than approx. 100/120 seconds it will suspend forever.

This is my tests for number of rows:
             | ROWS   | MODE       | RESULT | TIME         |
             | 100 000 | non trans  | OK          | 45 s          |
             | 200 000 | non trans  | FAIL        | suspends |
             | 10 000   | trans         | OK          |  21 s         |
             |  80 000   |  trans     | FAIL        | suspends |

I have  test in on many environments (cloudera 5.5 quickstart, tried also newer 
version of zookeeper: 3.4.8).

Here it is test program:

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.util.Properties;
import org.apache.phoenix.query.QueryServices;


/*
 
 SCHEMA:
 create table test_nontrans (id bigint primary key, val varchar);
 
 */

public class Main_non_trans {


                public static void main(String args[]) throws Exception {

                        
                        String MAX_MUTATIONS = "10000000";
                    Properties prop = new Properties();
                    prop.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, 
MAX_MUTATIONS);
                    
prop.setProperty(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, 
MAX_MUTATIONS);
                        
                    
                    //connect
                    Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
                    Connection conn =  
DriverManager.getConnection("jdbc:phoenix:192.168.13.75:", prop);
                    conn.setAutoCommit(false);
                    System.out.println("got connection");
                    
                    
                    //write
                    final long start = System.currentTimeMillis();
                    PreparedStatement statement = conn.prepareStatement("upsert 
into test_nontrans(id, val) values(?, ?)");
  
                    
                    for(long i=1; i<=160000; i++)
                    {
                        statement.setLong(1, i);
                        statement.setString(2, "value " + i);
                        statement.execute();
                    }
                        
                    //commit
                    System.out.println("commiting...");
                    conn.commit();
                    
                    
                    System.out.println("Done in: " + 
(System.currentTimeMillis() - start)/1000.0 );
                    
                    
                  }     
}







> Long-lasting writes suspends JDBC client forever.
> -------------------------------------------------
>
>                 Key: PHOENIX-3007
>                 URL: https://issues.apache.org/jira/browse/PHOENIX-3007
>             Project: Phoenix
>          Issue Type: Bug
>    Affects Versions: 4.7.0
>         Environment: client jdk7 windows
> Server:
>  Linux Ubuntu 14.0.4
>  hbase 1.1.4
>  phoenix 4.7.0
>            Reporter: Krzysztof Bień
>
> When try to write too many rows in one commit the client suspends forever.
> It occurs in both modes: for transactional and non transactional tables.
> However:
> When using non-transactional table the data are succsessfully written on the 
> server side after expected amount of time (client suspends).
> When using transactional table the data are never written on the server side 
> (client suspends).
> I suspect that is connected with the timing. When a transaction lasts longer 
> than approx. 100/120 seconds it will suspend forever.
> This is my tests for number of rows:
>              | ROWS   | MODE       | RESULT | TIME         |
>              | 100 000 | non trans  | OK          | 45 s          |
>            | 200 000 | non trans  | FAIL        | suspends |
>            | 10 000   | trans         | OK          |  21 s         |
>            |  80 000   |  trans     | FAIL        | suspends |
> I have  tested in on many environments (cloudera 5.5 quickstart, tried also 
> newer version of zookeeper: 3.4.8).
> Here it is test program:
> import java.sql.Connection;
> import java.sql.DriverManager;
> import java.sql.PreparedStatement;
> import java.util.Properties;
> import org.apache.phoenix.query.QueryServices;
> /*
>  
>  SCHEMA:
>  create table test_nontrans (id bigint primary key, val varchar);
>  
>  */
> public class Main_non_trans {
>               public static void main(String args[]) throws Exception {
>                       
>                       String MAX_MUTATIONS = "10000000";
>                   Properties prop = new Properties();
>                   prop.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, 
> MAX_MUTATIONS);
>                   
> prop.setProperty(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, 
> MAX_MUTATIONS);
>                       
>                   
>                   //connect
>                   Class.forName("org.apache.phoenix.jdbc.PhoenixDriver");
>                   Connection conn =  
> DriverManager.getConnection("jdbc:phoenix:192.168.13.75:", prop);
>                   conn.setAutoCommit(false);
>                   System.out.println("got connection");
>                   
>                   
>                   //write
>                   final long start = System.currentTimeMillis();
>                   PreparedStatement statement = conn.prepareStatement("upsert 
> into test_nontrans(id, val) values(?, ?)");
>   
>                   
>                   for(long i=1; i<=160000; i++)
>                   {
>                       statement.setLong(1, i);
>                       statement.setString(2, "value " + i);
>                       statement.execute();
>                   }
>                       
>                   //commit
>                   System.out.println("commiting...");
>                   conn.commit();
>                   
>                   
>                   System.out.println("Done in: " + 
> (System.currentTimeMillis() - start)/1000.0 );
>                   
>                   
>                 }     
> }



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to