This is an automated email from the ASF dual-hosted git repository. abudnikov pushed a commit to branch IGNITE-7595 in repository https://gitbox.apache.org/repos/asf/ignite.git
The following commit(s) were added to refs/heads/IGNITE-7595 by this push: new 32b69d5 add more pages 32b69d5 is described below commit 32b69d580e6867872b5d0fcce2c24950a985323c Author: abudnikov <abudni...@gridgain.com> AuthorDate: Fri Jul 17 19:04:19 2020 +0300 add more pages --- docs/README.adoc | 1 - docs/_data/toc.yaml | 53 +- .../code-snippets/dotnet/MemoryArchitecture.cs | 63 + docs/_docs/code-snippets/dotnet/SqlTransactions.cs | 86 + .../code-snippets/dotnet/WorkingWithEvents.cs | 167 ++ .../java/org/apache/ignite/snippets/Indexes.java | 143 + .../org/apache/ignite/snippets/Indexes_groups.java | 20 + .../apache/ignite/snippets/JDBCClientDriver.java | 67 + .../org/apache/ignite/snippets/JDBCThinDriver.java | 221 ++ .../main/java/org/apache/ignite/snippets/ODBC.java | 22 + .../apache/ignite/snippets/RESTConfiguration.java | 15 + .../java/org/apache/ignite/snippets/Schemas.java | 22 + docs/_docs/code-snippets/xml/binary-objects.xml | 38 + docs/_docs/code-snippets/xml/eviction.xml | 42 + .../_docs/code-snippets/xml/http-configuration.xml | 50 + docs/_docs/code-snippets/xml/jetty.xml | 53 + docs/_docs/code-snippets/xml/mvcc.xml | 30 + docs/_docs/code-snippets/xml/schemas.xml | 32 + docs/_docs/developers-guide/events/events.adoc | 350 +++ .../events/listening-to-events.adoc | 254 ++ .../memory-configuration/data-regions.adoc | 70 + .../memory-configuration/eviction-policies.adoc | 163 ++ .../memory-configuration/index.adoc | 5 + docs/_docs/developers-guide/restapi.adoc | 2820 ++++++++++++++++++++ docs/_docs/developers-guide/transactions/mvcc.adoc | 179 ++ 25 files changed, 4937 insertions(+), 29 deletions(-) diff --git a/docs/README.adoc b/docs/README.adoc index 1b4a562..185be3c 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -10,7 +10,6 @@ The Apache Ignite documentation is written in link:https://asciidoctor.org/docs/ The Asciidoc files are compiled into HTML pages and published to https://ignite.apache.org/docs. - .Content of the “docs” directory [cols="1,4",opts="stretch"] |=== diff --git a/docs/_data/toc.yaml b/docs/_data/toc.yaml index 50f1f99..3a25bd4 100644 --- a/docs/_data/toc.yaml +++ b/docs/_data/toc.yaml @@ -123,27 +123,27 @@ - title: Performing Transactions url: /developers-guide/key-value-api/transactions - title: Working with SQL -# items: -# - title: Introduction -# url: /developers-guide/SQL/sql-introduction -# - title: Understanding Schemas -# url: /developers-guide/SQL/schemas -# - title: Defining Indexes -# url: /developers-guide/SQL/indexes -# - title: Using SQL API -# url: /developers-guide/SQL/sql-api -# - title: Distributed Joins -# url: /developers-guide/SQL/distributed-joins -# - title: SQL Transactions -# url: /developers-guide/SQL/sql-transactions -# - title: Custom SQL Functions -# url: /developers-guide/SQL/custom-sql-func -# - title: JDBC Driver -# url: /developers-guide/SQL/JDBC/jdbc-driver -# - title: JDBC Client Driver -# url: /developers-guide/SQL/JDBC/jdbc-client-driver -# - title: Multiversion Concurrency Control -# url: /developers-guide/transactions/mvcc + items: + - title: Introduction + url: /developers-guide/SQL/sql-introduction + - title: Understanding Schemas + url: /developers-guide/SQL/schemas + - title: Defining Indexes + url: /developers-guide/SQL/indexes + - title: Using SQL API + url: /developers-guide/SQL/sql-api + - title: Distributed Joins + url: /developers-guide/SQL/distributed-joins + - title: SQL Transactions + url: /developers-guide/SQL/sql-transactions + - title: Custom SQL Functions + url: /developers-guide/SQL/custom-sql-func + - title: JDBC Driver + url: /developers-guide/SQL/JDBC/jdbc-driver + - title: JDBC Client Driver + url: /developers-guide/SQL/JDBC/jdbc-client-driver + - title: Multiversion Concurrency Control + url: /developers-guide/transactions/mvcc - title: SQL and Key-Value Usage url: /developers-guide/SQL/sql-key-value-storage - title: Distributed Computing @@ -168,17 +168,14 @@ items: - title: Enabling and Listenting to Events url: /developers-guide/events/listening-to-events -# - title: Events -# url: /developers-guide/events/events + - title: Events + url: /developers-guide/events/events - title: Near Caches url: /developers-guide/near-cache # - title: .NET Platform Cache # url: /developers-guide/platform-cache - title: Peer Class Loading url: /developers-guide/peer-class-loading -# - title: Handling Exceptions -# url: /developers-guide/handling-exceptions -# # - title: Thin Clients # items: # - title: Thin Clients Overview @@ -209,8 +206,8 @@ # url: /developers-guide/SQL/ODBC/data-types # - title: Error Codes # url: /developers-guide/SQL/ODBC/error-codes -# - title: REST API -# url: /developers-guide/restapi + - title: REST API + url: /developers-guide/restapi # # - title: Machine Learning # items: diff --git a/docs/_docs/code-snippets/dotnet/MemoryArchitecture.cs b/docs/_docs/code-snippets/dotnet/MemoryArchitecture.cs new file mode 100644 index 0000000..d804c21 --- /dev/null +++ b/docs/_docs/code-snippets/dotnet/MemoryArchitecture.cs @@ -0,0 +1,63 @@ +using Apache.Ignite.Core; +using Apache.Ignite.Core.Configuration; + +namespace dotnet_helloworld +{ + class MemoryArchitecture + { + public static void MemoryConfiguration() + { + // tag::mem[] + var cfg = new IgniteConfiguration + { + DataStorageConfiguration = new DataStorageConfiguration + { + DefaultDataRegionConfiguration = new DataRegionConfiguration + { + Name = "Default_Region", + InitialSize = 100 * 1024 * 1024 + }, + DataRegionConfigurations = new[] + { + new DataRegionConfiguration + { + Name = "40MB_Region_Eviction", + InitialSize = 20 * 1024 * 1024, + MaxSize = 40 * 1024 * 1024, + PageEvictionMode = DataPageEvictionMode.Random2Lru + }, + new DataRegionConfiguration + { + Name = "30MB_Region_Swapping", + InitialSize = 15 * 1024 * 1024, + MaxSize = 30 * 1024 * 1024, + SwapPath = "/path/to/swap/file" + } + } + } + }; + Ignition.Start(cfg); + // end::mem[] + } + + public static void DefaultDataReqion() + { + // tag::DefaultDataReqion[] + var cfg = new IgniteConfiguration + { + DataStorageConfiguration = new DataStorageConfiguration + { + DefaultDataRegionConfiguration = new DataRegionConfiguration + { + Name = "Default_Region", + InitialSize = 100 * 1024 * 1024 + } + } + }; + + // Start the node. + var ignite = Ignition.Start(cfg); + // end::DefaultDataReqion[] + } + } +} diff --git a/docs/_docs/code-snippets/dotnet/SqlTransactions.cs b/docs/_docs/code-snippets/dotnet/SqlTransactions.cs new file mode 100644 index 0000000..4116b9e --- /dev/null +++ b/docs/_docs/code-snippets/dotnet/SqlTransactions.cs @@ -0,0 +1,86 @@ +using System; +using Apache.Ignite.Core; +using Apache.Ignite.Core.Cache; +using Apache.Ignite.Core.Cache.Configuration; +using Apache.Ignite.Core.Discovery.Tcp; +using Apache.Ignite.Core.Discovery.Tcp.Static; + +namespace dotnet_helloworld +{ + public class SqlTransactions + { + public static void EnablingMvcc() + { + var ignite = Ignition.Start( + new IgniteConfiguration + { + DiscoverySpi = new TcpDiscoverySpi + { + LocalPort = 48500, + LocalPortRange = 20, + IpFinder = new TcpDiscoveryStaticIpFinder + { + Endpoints = new[] + { + "127.0.0.1:48500..48520" + } + } + } + }); + + // tag::mvcc[] + var cacheCfg = new CacheConfiguration + { + Name = "myCache", + AtomicityMode = CacheAtomicityMode.TransactionalSnapshot + }; + // end::mvcc[] + ignite.CreateCache<long, long>(cacheCfg); + Console.Write(typeof(Person)); + } + + public static void ConcurrentUpdates() + { + var cfg = new IgniteConfiguration + { + CacheConfiguration = new[] + { + new CacheConfiguration + { + Name = "mvccCache", + AtomicityMode = CacheAtomicityMode.TransactionalSnapshot + }, + } + }; + var ignite = Ignition.Start(cfg); + var cache = ignite.GetCache<int, string>("mvccCache"); + + // tag::mvccConcurrentUpdates[] + for (var i = 1; i <= 5; i++) + { + using (var tx = ignite.GetTransactions().TxStart()) + { + Console.WriteLine($"attempt #{i}, value: {cache.Get(1)}"); + try + { + cache.Put(1, "new value"); + tx.Commit(); + Console.WriteLine($"attempt #{i} succeeded"); + break; + } + catch (CacheException) + { + if (!tx.IsRollbackOnly) + { + // Transaction was not marked as "rollback only", + // so it's not a concurrent update issue. + // Process the exception here. + break; + } + } + } + } + // end::mvccConcurrentUpdates[] + } + } +} \ No newline at end of file diff --git a/docs/_docs/code-snippets/dotnet/WorkingWithEvents.cs b/docs/_docs/code-snippets/dotnet/WorkingWithEvents.cs new file mode 100644 index 0000000..8601fa0 --- /dev/null +++ b/docs/_docs/code-snippets/dotnet/WorkingWithEvents.cs @@ -0,0 +1,167 @@ +using System; +using Apache.Ignite.Core; +using Apache.Ignite.Core.Discovery.Tcp; +using Apache.Ignite.Core.Discovery.Tcp.Static; +using Apache.Ignite.Core.Events; + +namespace dotnet_helloworld +{ + public class WorkingWithEvents + { + public static void EnablingEvents() + { + //tag::enablingEvents[] + var cfg = new IgniteConfiguration + { + IncludedEventTypes = new[] + { + EventType.CacheObjectPut, + EventType.CacheObjectRead, + EventType.CacheObjectRemoved, + EventType.NodeJoined, + EventType.NodeLeft + } + }; + // end::enablingEvents[] + var discoverySpi = new TcpDiscoverySpi + { + LocalPort = 48500, + LocalPortRange = 20, + IpFinder = new TcpDiscoveryStaticIpFinder + { + Endpoints = new[] + { + "127.0.0.1:48500..48520" + } + } + }; + cfg.DiscoverySpi = discoverySpi; + // tag::enablingEvents[] + var ignite = Ignition.Start(cfg); + // end::enablingEvents[] + } + + public static void GettingEventsInterface1() + { + //tag::gettingEventsInterface1[] + var ignite = Ignition.GetIgnite(); + var events = ignite.GetEvents(); + //end::gettingEventsInterface1[] + } + + public static void GettingEventsInterface2() + { + //tag::gettingEventsInterface2[] + var ignite = Ignition.GetIgnite(); + var events = ignite.GetCluster().ForCacheNodes("person").GetEvents(); + //end::gettingEventsInterface2[] + } + + //tag::localListen[] + class LocalListener : IEventListener<CacheEvent> + { + public bool Invoke(CacheEvent evt) + { + Console.WriteLine("Received event [evt=" + evt.Name + ", key=" + evt.Key + ", oldVal=" + evt.OldValue + + ", newVal=" + evt.NewValue); + return true; + } + } + + public static void LocalListenDemo() + { + var cfg = new IgniteConfiguration + { + IncludedEventTypes = new[] + { + EventType.CacheObjectPut, + EventType.CacheObjectRead, + EventType.CacheObjectRemoved, + } + }; + //end::localListen[] + var discoverySpi = new TcpDiscoverySpi + { + LocalPort = 48500, + LocalPortRange = 20, + IpFinder = new TcpDiscoveryStaticIpFinder + { + Endpoints = new[] + { + "127.0.0.1:48500..48520" + } + } + }; + cfg.DiscoverySpi = discoverySpi; + // tag::localListen[] + var ignite = Ignition.Start(cfg); + var events = ignite.GetEvents(); + events.LocalListen(new LocalListener(), EventType.CacheObjectPut, EventType.CacheObjectRead, + EventType.CacheObjectRemoved); + + var cache = ignite.GetOrCreateCache<int, int>("myCache"); + cache.Put(1, 1); + cache.Put(2, 2); + } + //end::localListen[] + + + //tag::queryRemote[] + class EventFilter : IEventFilter<CacheEvent> + { + public bool Invoke(CacheEvent evt) + { + return true; + } + } + // .... + + + //end::queryRemote[] + + + public static void StoringEventsDemo() + { + //tag::storingEvents[] + var cfg = new IgniteConfiguration + { + EventStorageSpi = new MemoryEventStorageSpi() + { + ExpirationTimeout = TimeSpan.FromMilliseconds(600000) + }, + IncludedEventTypes = new[] + { + EventType.CacheObjectPut, + EventType.CacheObjectRead, + EventType.CacheObjectRemoved, + } + }; + //end::storingEvents[] + var discoverySpi = new TcpDiscoverySpi + { + LocalPort = 48500, + LocalPortRange = 20, + IpFinder = new TcpDiscoveryStaticIpFinder + { + Endpoints = new[] + { + "127.0.0.1:48500..48520" + } + } + }; + cfg.DiscoverySpi = discoverySpi; + //tag::storingEvents[] + var ignite = Ignition.Start(cfg); + //end::storingEvents[] + //tag::queryLocal[] + //tag::queryRemote[] + var events = ignite.GetEvents(); + //end::queryRemote[] + var cacheEvents = events.LocalQuery(EventType.CacheObjectPut); + //end::queryLocal[] + //tag::queryRemote[] + var storedEvents = events.RemoteQuery(new EventFilter(), null, EventType.CacheObjectPut); + //end::queryRemote[] + } + } +} \ No newline at end of file diff --git a/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/Indexes.java b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/Indexes.java new file mode 100644 index 0000000..b913432 --- /dev/null +++ b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/Indexes.java @@ -0,0 +1,143 @@ +package org.apache.ignite.snippets; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.Set; + +import org.apache.ignite.Ignite; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.QueryIndex; +import org.apache.ignite.cache.QueryIndexType; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.configuration.CacheConfiguration; + +public class Indexes { + + // tag::configuring-with-annotation[] + public class Person implements Serializable { + /** Indexed field. Will be visible to the SQL engine. */ + @QuerySqlField(index = true) + private long id; + + /** Queryable field. Will be visible to the SQL engine. */ + @QuerySqlField + private String name; + + /** Will NOT be visible to the SQL engine. */ + private int age; + + /** + * Indexed field sorted in descending order. Will be visible to the SQL engine. + */ + @QuerySqlField(index = true, descending = true) + private float salary; + } + // end::configuring-with-annotation[] + + public class Person2 implements Serializable { + // tag::annotation-with-inline-size[] + @QuerySqlField(index = true, inlineSize = 13) + private String country; + // end::annotation-with-inline-size[] + } + + void register() { + // tag::register-indexed-types[] + // Preparing configuration. + CacheConfiguration<Long, Person> ccfg = new CacheConfiguration<>(); + + // Registering indexed type. + ccfg.setIndexedTypes(Long.class, Person.class); + // end::register-indexed-types[] + } + + void executeQuery() { + // tag::query[] + SqlFieldsQuery qry = new SqlFieldsQuery("SELECT id, name FROM Person" + "WHERE id > 1500 LIMIT 10"); + // end::query[] + } + + void withQueryEntities() { + // tag::index-using-queryentity[] + CacheConfiguration<Long, Person> cache = new CacheConfiguration<Long, Person>("myCache"); + + QueryEntity queryEntity = new QueryEntity(); + + queryEntity.setKeyFieldName("id").setKeyType(Long.class.getName()).setValueType(Person.class.getName()); + + LinkedHashMap<String, String> fields = new LinkedHashMap<>(); + fields.put("id", "java.lang.Long"); + fields.put("name", "java.lang.String"); + fields.put("salary", "java.lang.Long"); + + queryEntity.setFields(fields); + + queryEntity.setIndexes(Arrays.asList(new QueryIndex("name"), + new QueryIndex(Arrays.asList("id", "salary"), QueryIndexType.SORTED))); + + cache.setQueryEntities(Arrays.asList(queryEntity)); + + // end::index-using-queryentity[] + } + + void inline() { + + QueryEntity queryEntity = new QueryEntity(); + // tag::query-entity-with-inline-size[] + QueryIndex idx = new QueryIndex("country"); + idx.setInlineSize(13); + queryEntity.setIndexes(Arrays.asList(idx)); + // end::query-entity-with-inline-size[] + } + + void customKeys() { + Ignite ignite = Ignition.start(); + // tag::custom-key[] + // Preparing cache configuration. + CacheConfiguration<Long, Person> cacheCfg = new CacheConfiguration<Long, Person>("personCache"); + + // Creating the query entity. + QueryEntity entity = new QueryEntity("CustomKey", "Person"); + + // Listing all the queryable fields. + LinkedHashMap<String, String> fields = new LinkedHashMap<>(); + + fields.put("intKeyField", Integer.class.getName()); + fields.put("strKeyField", String.class.getName()); + + fields.put("firstName", String.class.getName()); + fields.put("lastName", String.class.getName()); + + entity.setFields(fields); + + // Listing a subset of the fields that belong to the key. + Set<String> keyFlds = new HashSet<>(); + + keyFlds.add("intKeyField"); + keyFlds.add("strKeyField"); + + entity.setKeyFields(keyFlds); + + // End of new settings, nothing else here is DML related + + entity.setIndexes(Collections.<QueryIndex>emptyList()); + + cacheCfg.setQueryEntities(Collections.singletonList(entity)); + + ignite.createCache(cacheCfg); + + // end::custom-key[] + + } + + public static void main(String[] args) { + Indexes ind = new Indexes(); + + ind.withQueryEntities(); + } +} diff --git a/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/Indexes_groups.java b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/Indexes_groups.java new file mode 100644 index 0000000..061ed72 --- /dev/null +++ b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/Indexes_groups.java @@ -0,0 +1,20 @@ +package org.apache.ignite.snippets; + +import java.io.Serializable; + +import org.apache.ignite.cache.query.annotations.QuerySqlField; + +public class Indexes_groups { + + //tag::group-indexes[] + public class Person implements Serializable { + /** Indexed in a group index with "salary". */ + @QuerySqlField(orderedGroups = { @QuerySqlField.Group(name = "age_salary_idx", order = 0, descending = true) }) + private int age; + + /** Indexed separately and in a group index with "age". */ + @QuerySqlField(index = true, orderedGroups = { @QuerySqlField.Group(name = "age_salary_idx", order = 3) }) + private double salary; + } + //end::group-indexes[] +} diff --git a/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/JDBCClientDriver.java b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/JDBCClientDriver.java new file mode 100644 index 0000000..42da3da --- /dev/null +++ b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/JDBCClientDriver.java @@ -0,0 +1,67 @@ +package org.apache.ignite.snippets; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.junit.jupiter.api.Test; + +public class JDBCClientDriver { + + @Test + void registerDriver() throws ClassNotFoundException, SQLException { + //tag::register[] + // Registering the JDBC driver. + Class.forName("org.apache.ignite.IgniteJdbcDriver"); + + // Opening JDBC connection (cache name is not specified, which means that we use default cache). + Connection conn = DriverManager.getConnection("jdbc:ignite:cfg://config/ignite-jdbc.xml"); + + //end::register[] + conn.close(); + } + + @Test + void streaming() throws ClassNotFoundException, SQLException { + // Register JDBC driver. + Class.forName("org.apache.ignite.IgniteJdbcDriver"); + + // Opening connection in the streaming mode. + Connection conn = DriverManager + .getConnection("jdbc:ignite:cfg://cache=myCache:streaming=true@file://config/ignite-jdbc.xml"); + + conn.close(); + } + + @Test + void timeBasedFlushing() throws ClassNotFoundException, SQLException { + //tag::time-based-flushing[] + // Register JDBC driver. + Class.forName("org.apache.ignite.IgniteJdbcDriver"); + + // Opening a connection in the streaming mode and time based flushing set. + Connection conn = DriverManager.getConnection("jdbc:ignite:cfg://streaming=true:streamingFlushFrequency=1000@file:///etc/config/ignite-jdbc.xml"); + + PreparedStatement stmt = conn.prepareStatement( + "INSERT INTO Person(_key, name, age) VALUES(CAST(? as BIGINT), ?, ?)"); + + // Adding the data. + for (int i = 1; i < 100000; i++) { + // Inserting a Person object with a Long key. + stmt.setInt(1, i); + stmt.setString(2, "John Smith"); + stmt.setInt(3, 25); + + stmt.execute(); + } + + conn.close(); + + // Beyond this point, all data is guaranteed to be flushed into the cache. + + //end::time-based-flushing[] + } + +} diff --git a/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/JDBCThinDriver.java b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/JDBCThinDriver.java new file mode 100644 index 0000000..e19d2ea --- /dev/null +++ b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/JDBCThinDriver.java @@ -0,0 +1,221 @@ +package org.apache.ignite.snippets; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.apache.ignite.IgniteJdbcThinDataSource; + +public class JDBCThinDriver { + + Connection getConnection() throws ClassNotFoundException, SQLException { + + // tag::get-connection[] + // Register JDBC driver. + Class.forName("org.apache.ignite.IgniteJdbcThinDriver"); + + // Open the JDBC connection. + Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1"); + + // end::get-connection[] + return conn; + } + + void multipleEndpoints() throws ClassNotFoundException, SQLException { + // tag::multiple-endpoints[] + + // Register JDBC Driver. + Class.forName("org.apache.ignite.IgniteJdbcThinDriver"); + + // Open the JDBC connection passing several connection endpoints. + Connection conn = DriverManager + .getConnection("jdbc:ignite:thin://192.168.0.50:101,192.188.5.40:101,192.168.10.230:101"); + // end::multiple-endpoints[] + + } + + public Connection connectionFromDatasource() throws SQLException { + // tag::connection-from-data-source[] + // Or open connection via DataSource. + IgniteJdbcThinDataSource ids = new IgniteJdbcThinDataSource(); + ids.setUrl("jdbc:ignite:thin://127.0.0.1"); + ids.setDistributedJoins(true); + + Connection conn = ids.getConnection(); + // end::connection-from-data-source[] + + return conn; + } + + void select() throws ClassNotFoundException, SQLException { + + Connection conn = getConnection(); + + // tag::select[] + // Query people with specific age using prepared statement. + PreparedStatement stmt = conn.prepareStatement("select name, age from Person where age = ?"); + + stmt.setInt(1, 30); + + ResultSet rs = stmt.executeQuery(); + + while (rs.next()) { + String name = rs.getString("name"); + int age = rs.getInt("age"); + // ... + } + // end::select[] + conn.close(); + } + + void insert() throws ClassNotFoundException, SQLException { + + Connection conn = getConnection(); + // tag::insert[] + // Insert a Person with a Long key. + PreparedStatement stmt = conn + .prepareStatement("INSERT INTO Person(_key, name, age) VALUES(CAST(? as BIGINT), ?, ?)"); + + stmt.setInt(1, 1); + stmt.setString(2, "John Smith"); + stmt.setInt(3, 25); + + stmt.execute(); + // end::insert[] + conn.close(); + } + + void merge() throws ClassNotFoundException, SQLException { + + Connection conn = getConnection(); + // tag::merge[] + // Merge a Person with a Long key. + PreparedStatement stmt = conn + .prepareStatement("MERGE INTO Person(_key, name, age) VALUES(CAST(? as BIGINT), ?, ?)"); + + stmt.setInt(1, 1); + stmt.setString(2, "John Smith"); + stmt.setInt(3, 25); + + stmt.executeUpdate(); + // end::merge[] + conn.close(); + } + + void partitionAwareness() throws ClassNotFoundException, SQLException { + + // tag::partition-awareness[] + Class.forName("org.apache.ignite.IgniteJdbcThinDriver"); + + Connection conn = DriverManager + .getConnection("jdbc:ignite:thin://192.168.0.50,192.188.5.40,192.168.10.230?partitionAwareness=true"); + // end::partition-awareness[] + + conn.close(); + } + + void handleException() throws ClassNotFoundException, SQLException { + + Connection conn = getConnection(); + // tag::handle-exception[] + PreparedStatement ps; + + try { + ps = conn.prepareStatement("INSERT INTO Person(id, name, age) values (1, 'John', 'unparseableString')"); + } catch (SQLException e) { + switch (e.getSQLState()) { + case "0700B": + System.out.println("Conversion failure"); + break; + + case "42000": + System.out.println("Parsing error"); + break; + + default: + System.out.println("Unprocessed error: " + e.getSQLState()); + break; + } + } + // end::handle-exception[] + } + + void ssl() throws ClassNotFoundException, SQLException { + + //tag::ssl[] + Class.forName("org.apache.ignite.IgniteJdbcThinDriver"); + + String keyStore = "keystore/node.jks"; + String keyStorePassword = "123456"; + + String trustStore = "keystore/trust.jks"; + String trustStorePassword = "123456"; + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?sslMode=require" + + "&sslClientCertificateKeyStoreUrl=" + keyStore + "&sslClientCertificateKeyStorePassword=" + + keyStorePassword + "&sslTrustCertificateKeyStoreUrl=" + trustStore + + "&sslTrustCertificateKeyStorePassword=" + trustStorePassword)) { + + ResultSet rs = conn.createStatement().executeQuery("select 10"); + rs.next(); + System.out.println(rs.getInt(1)); + } catch (Exception e) { + e.printStackTrace(); + } + + //end::ssl[] + + } + + void errorCodes() throws ClassNotFoundException, SQLException { + //tag::error-codes[] + // Register JDBC driver. + Class.forName("org.apache.ignite.IgniteJdbcThinDriver"); + + // Open JDBC connection. + Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1"); + + PreparedStatement ps; + + try { + ps = conn.prepareStatement("INSERT INTO Person(id, name, age) values (1," + "'John', 'unparseableString')"); + } catch (SQLException e) { + switch (e.getSQLState()) { + case "0700B": + System.out.println("Conversion failure"); + break; + + case "42000": + System.out.println("Parsing error"); + break; + + default: + System.out.println("Unprocessed error: " + e.getSQLState()); + break; + } + } + + //end::error-codes[] + } + + public static void main(String[] args) throws Exception { + // Ignite ignite = Util.startNode(); + try { + JDBCThinDriver j = new JDBCThinDriver(); + + // j.getConnection(); + // j.multipleEndpoints(); + // j.connectionFromDatasource(); + // j.partitionAwareness(); + + j.ssl(); + } catch (Exception e) { + e.printStackTrace(); + System.exit(1); + } finally { + // ignite.close(); + } + } +} diff --git a/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/ODBC.java b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/ODBC.java new file mode 100644 index 0000000..27a5a61 --- /dev/null +++ b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/ODBC.java @@ -0,0 +1,22 @@ +package org.apache.ignite.snippets; + +import org.apache.ignite.configuration.ClientConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; + +public class ODBC { + + void enableODBC() { + IgniteConfiguration cfg = new IgniteConfiguration(); + ClientConnectorConfiguration clientConnectorCfg = new ClientConnectorConfiguration(); + + clientConnectorCfg.setHost("127.0.0.1"); + clientConnectorCfg.setPort(12345); + clientConnectorCfg.setPortRange(2); + clientConnectorCfg.setMaxOpenCursorsPerConnection(512); + clientConnectorCfg.setSocketSendBufferSize(65536); + clientConnectorCfg.setSocketReceiveBufferSize(131072); + clientConnectorCfg.setThreadPoolSize(4); + + cfg.setClientConnectorConfiguration(clientConnectorCfg); + } +} diff --git a/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/RESTConfiguration.java b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/RESTConfiguration.java new file mode 100644 index 0000000..a40beaa --- /dev/null +++ b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/RESTConfiguration.java @@ -0,0 +1,15 @@ +package org.apache.ignite.snippets; + +import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; + +public class RESTConfiguration { + + void config() { + //tag::http-configuration[] + IgniteConfiguration cfg = new IgniteConfiguration(); + cfg.setConnectorConfiguration(new ConnectorConfiguration().setJettyPath("jetty.xml")); + //end::http-configuration[] + } + +} diff --git a/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/Schemas.java b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/Schemas.java new file mode 100644 index 0000000..bc3e4f0 --- /dev/null +++ b/docs/_docs/code-snippets/java/src/main/java/org/apache/ignite/snippets/Schemas.java @@ -0,0 +1,22 @@ +package org.apache.ignite.snippets; + +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.SqlConfiguration; +import org.junit.jupiter.api.Test; + +public class Schemas { + + @Test + void config() { + //tag::custom-schemas[] + IgniteConfiguration cfg = new IgniteConfiguration(); + + SqlConfiguration sqlCfg = new SqlConfiguration(); + + sqlCfg.setSqlSchemas("sqlSchemas"); + + cfg.setSqlConfiguration(sqlCfg); + + //end::custom-schemas[] + } +} diff --git a/docs/_docs/code-snippets/xml/binary-objects.xml b/docs/_docs/code-snippets/xml/binary-objects.xml new file mode 100644 index 0000000..97b315b --- /dev/null +++ b/docs/_docs/code-snippets/xml/binary-objects.xml @@ -0,0 +1,38 @@ +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" xmlns:util="http://www.springframework.org/schema/util" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util.xsd"> + <!-- tag::ignite-config[] --> + <bean class="org.apache.ignite.configuration.IgniteConfiguration"> + + <property name="binaryConfiguration"> + <bean class="org.apache.ignite.configuration.BinaryConfiguration"> + <property name="nameMapper" ref="globalNameMapper"/> + <property name="idMapper" ref="globalIdMapper"/> + <property name="typeConfigurations"> + <list> + <bean class="org.apache.ignite.binary.BinaryTypeConfiguration"> + <property name="typeName" value="org.apache.ignite.examples.*"/> + <property name="serializer" ref="exampleSerializer"/> + </bean> + </list> + </property> + </bean> + </property> + + <!-- tag::discovery[] --> + <property name="discoverySpi"> + <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi"> + <property name="ipFinder"> + <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder"> + <property name="addresses"> + <list> + <value>127.0.0.1:47500..47509</value> + </list> + </property> + </bean> + </property> + </bean> + </property> + <!-- end::discovery[] --> + </bean> + <!-- end::ignite-config[] --> +</beans> \ No newline at end of file diff --git a/docs/_docs/code-snippets/xml/eviction.xml b/docs/_docs/code-snippets/xml/eviction.xml new file mode 100644 index 0000000..01eb394 --- /dev/null +++ b/docs/_docs/code-snippets/xml/eviction.xml @@ -0,0 +1,42 @@ +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" xmlns:util="http://www.springframework.org/schema/util" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util.xsd"> + <!-- tag::ignite-config[] --> + <bean class="org.apache.ignite.configuration.IgniteConfiguration"> + <!-- Memory configuration. --> + <property name="dataStorageConfiguration"> + <bean class="org.apache.ignite.configuration.DataStorageConfiguration"> + <property name="dataRegionConfigurations"> + <list> + <!-- Defining a data region that will consume up to 20 GB of RAM. --> + <bean class="org.apache.ignite.configuration.DataRegionConfiguration"> + <!-- Custom region name. --> + <property name="name" value="20GB_Region"/> + <!-- 500 MB initial size (RAM). --> + <property name="initialSize" value="#{500L * 1024 * 1024}"/> + <!-- 20 GB maximum size (RAM). --> + <property name="maxSize" value="#{20L * 1024 * 1024 * 1024}"/> + <!-- Enabling RANDOM_LRU eviction for this region. --> + <property name="pageEvictionMode" value="RANDOM_LRU"/> + </bean> + </list> + </property> + </bean> + </property> + <!-- tag::discovery[] --> + <property name="discoverySpi"> + <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi"> + <property name="ipFinder"> + <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder"> + <property name="addresses"> + <list> + <value>127.0.0.1:47500..47509</value> + </list> + </property> + </bean> + </property> + </bean> + </property> + <!-- end::discovery[] --> + </bean> + <!-- end::ignite-config[] --> +</beans> \ No newline at end of file diff --git a/docs/_docs/code-snippets/xml/http-configuration.xml b/docs/_docs/code-snippets/xml/http-configuration.xml new file mode 100644 index 0000000..294488c --- /dev/null +++ b/docs/_docs/code-snippets/xml/http-configuration.xml @@ -0,0 +1,50 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + --> +<!-- + Ignite configuration with all defaults and enabled p2p deployment and enabled events. + --> +<beans xmlns="http://www.springframework.org/schema/beans" xmlns:util="http://www.springframework.org/schema/util" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util.xsd"> + <!-- tag::ignite-config[] --> + <bean class="org.apache.ignite.configuration.IgniteConfiguration" id="ignite.cfg"> + <!-- tag::http-configuration[] --> + <property name="connectorConfiguration"> + <bean class="org.apache.ignite.configuration.ConnectorConfiguration"> + <property name="jettyPath" value="jetty.xml"/> + </bean> + </property> + <!-- end::http-configuration[] --> + <!-- tag::discovery[] --> + <property name="discoverySpi"> + <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi"> + <property name="ipFinder"> + <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder"> + <property name="addresses"> + <list> + <!-- In distributed environment, replace with actual host IP address. --> + <value>127.0.0.1:47500..47509</value> + </list> + </property> + </bean> + </property> + </bean> + </property> + <!-- end::discovery[] --> + </bean> + + <!-- end::ignite-config[] --> +</beans> \ No newline at end of file diff --git a/docs/_docs/code-snippets/xml/jetty.xml b/docs/_docs/code-snippets/xml/jetty.xml new file mode 100644 index 0000000..0ee1e8a --- /dev/null +++ b/docs/_docs/code-snippets/xml/jetty.xml @@ -0,0 +1,53 @@ +<?xml version="1.0"?> +<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure.dtd"> +<Configure id="Server" class="org.eclipse.jetty.server.Server"> + <Arg name="threadPool"> + <!-- Default queued blocking thread pool --> + <New class="org.eclipse.jetty.util.thread.QueuedThreadPool"> + <Set name="minThreads">20</Set> + <Set name="maxThreads">200</Set> + </New> + </Arg> + <New id="httpCfg" class="org.eclipse.jetty.server.HttpConfiguration"> + <Set name="secureScheme">https</Set> + <Set name="securePort">8443</Set> + <Set name="sendServerVersion">true</Set> + <Set name="sendDateHeader">true</Set> + </New> + <Call name="addConnector"> + <Arg> + <New class="org.eclipse.jetty.server.ServerConnector"> + <Arg name="server"><Ref refid="Server"/></Arg> + <Arg name="factories"> + <Array type="org.eclipse.jetty.server.ConnectionFactory"> + <Item> + <New class="org.eclipse.jetty.server.HttpConnectionFactory"> + <Ref refid="httpCfg"/> + </New> + </Item> + </Array> + </Arg> + <Set name="host"> + <SystemProperty name="IGNITE_JETTY_HOST" default="localhost"/> + </Set> + <Set name="port"> + <SystemProperty name="IGNITE_JETTY_PORT" default="8080"/> + </Set> + <Set name="idleTimeout">30000</Set> + <Set name="reuseAddress">true</Set> + </New> + </Arg> + </Call> + <Set name="handler"> + <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> + <Set name="handlers"> + <Array type="org.eclipse.jetty.server.Handler"> + <Item> + <New id="Contexts" class="org.eclipse.jetty.server.handler.ContextHandlerCollection"/> + </Item> + </Array> + </Set> + </New> + </Set> + <Set name="stopAtShutdown">false</Set> +</Configure> diff --git a/docs/_docs/code-snippets/xml/mvcc.xml b/docs/_docs/code-snippets/xml/mvcc.xml new file mode 100644 index 0000000..a2e6fb7 --- /dev/null +++ b/docs/_docs/code-snippets/xml/mvcc.xml @@ -0,0 +1,30 @@ +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" xmlns:util="http://www.springframework.org/schema/util" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util.xsd"> + <!-- tag::ignite-config[] --> + <bean class="org.apache.ignite.configuration.IgniteConfiguration"> + + <property name="cacheConfiguration"> + <bean class="org.apache.ignite.configuration.CacheConfiguration"> + <property name="name" value="myCache"/> + <property name="atomicityMode" value="TRANSACTIONAL_SNAPSHOT"/> + </bean> + </property> + + <!-- tag::discovery[] --> + <property name="discoverySpi"> + <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi"> + <property name="ipFinder"> + <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder"> + <property name="addresses"> + <list> + <value>127.0.0.1:47500..47509</value> + </list> + </property> + </bean> + </property> + </bean> + </property> + <!-- end::discovery[] --> + </bean> + <!-- end::ignite-config[] --> +</beans> \ No newline at end of file diff --git a/docs/_docs/code-snippets/xml/schemas.xml b/docs/_docs/code-snippets/xml/schemas.xml new file mode 100644 index 0000000..bbbf923 --- /dev/null +++ b/docs/_docs/code-snippets/xml/schemas.xml @@ -0,0 +1,32 @@ +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" xmlns:util="http://www.springframework.org/schema/util" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util.xsd"> + <!-- tag::ignite-config[] --> + <bean class="org.apache.ignite.configuration.IgniteConfiguration"> + <property name="sqlConfiguration"> + <bean class="org.apache.ignite.configuration.SqlConfiguration"> + <property name="sqlSchemas"> + <list> + <value>MY_SCHEMA</value> + <value>MY_SECOND_SCHEMA</value> + </list> + </property> + </bean> + </property> + <!-- tag::discovery[] --> + <property name="discoverySpi"> + <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi"> + <property name="ipFinder"> + <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder"> + <property name="addresses"> + <list> + <value>127.0.0.1:47500..47509</value> + </list> + </property> + </bean> + </property> + </bean> + </property> + <!-- end::discovery[] --> + </bean> + <!-- end::ignite-config[] --> +</beans> \ No newline at end of file diff --git a/docs/_docs/developers-guide/events/events.adoc b/docs/_docs/developers-guide/events/events.adoc new file mode 100644 index 0000000..64c22a0 --- /dev/null +++ b/docs/_docs/developers-guide/events/events.adoc @@ -0,0 +1,350 @@ += Events +:javaFile: {javaCodeDir}/Events.java + +:events_url: {javadoc_base_url}/org/apache/ignite/events + + + +This page describes different event types, when and where they are generated, and how you can use them. + +You can always find the most complete and up to date list of events in the javadocs: link:{javadoc_base_url}/org/apache/ignite/events/EventType.html[Ignite events, window=_blank]. + + +== General Information + +All events implement the `Event` interface. +You may want to cast each event to the specific class to get extended information about the action the event was triggered by. +For example, the 'cache update' action triggers an event that is an instance of the `CacheEvent` class, which contains the information about the data that was modified, the ID of the subject that triggered the event, etc. + + +All events contain information about the node where the event was generated. +For example, when you execute an `IgniteClosure` job, the `EVT_JOB_STARTED` and `EVT_JOB_FINISHED` events contain the information about the node where the closure was executed. + +[source, java] +---- +include::{javaFile}[tags=get-node,indent=0] +---- +//// +When an event is generated by another event, the second event will contain the information about the first event. +For example, the cache rebalancing event can be triggered by +The rebalancing event will contain the information about the cause +//// + + +[CAUTION] +==== +[discrete] +=== Event Ordering + +The order of events in the event listener is not guaranteed to be the same as the order in which they were generated. + +==== + +=== SubjectID + +Some events contain the `subjectID` field, which represents the ID of the entity that initiated the action: + +* When the action is initiated by a server or client node, the `subjectID` is the ID of that node. +* When the action is done by a thin client, JDBC/ODBC/REST client, the `subjectID` is generated when the client connects to the cluster and remains the same as long as the client is connected to the cluster. + +Check the specific event class to learn if the `subjectID` field is present. + +This capability can be used for link:administrators-guide/security/auditing-events[auditing purposes]. + + +//// +== Node Lifecycle Events + +Refer to the lifecycle section. + +[cols="2,5",opts="header"] +|=== +|Event Type | Event Description +|BEFORE_NODE_START +|Invoked before Ignite node startup routine is initiated. + +|AFTER_NODE_START +|Invoked right after Ignite node has started. + +|BEFORE_NODE_STOP +|Invoked right before Ignite stop routine is initiated. + +|AFTER_NODE_STOP +|Invoked right after Ignite node has stopped. +|=== +//// + +== Cluster Activation Events + +Cluster activation events are instances of the link:{events_url}/ClusterActivationEvent.html[ClusterActivationEvent, window=_blank] class. +Cluster activation events are generated when the cluster is activated or deactivated. They contain the list of baseline nodes at the moment of the event. + +[cols="2,5,3",opts="header"] +|=== +|Event Type | Event Description | Where Event Is Fired +|EVT_CLUSTER_ACTIVATED | The cluster is activated (including when auto-activated). | All cluster nodes. +|EVT_CLUSTER_DEACTIVATED | The cluster is deactivated. | All cluster nodes. +|=== + +== Cache Lifecycle Events + +Cache Lifecycle events are instances of the link:{events_url}/CacheEvent.html[CacheEvent, window=_blank] class. +Each cache lifecycle event is associated with a specific cache and has a field that contains the name of the cache. + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_CACHE_STARTED +a| A cache is started on a specific node. +Each server node holds an internal instance of a cache. +This event is fired when the instance is created, which includes the following actions: + +* A cluster with existing caches is activated. The event is generated for every cache on all server nodes where the cache is configured. +* A server node joins the cluster with existing caches (the caches are started on that node). +* When you create a new cache dynamically by calling `Ignite.getOrCreateCache(...)` or similar methods. The event is fired on all nodes that host the cache. +* When you obtain an instance of a cache on a client node. +* When you create a cache via the link:sql-reference/ddl#create-table[CREATE TABLE] command. + + +| All nodes where the cache is started. +| EVT_CACHE_STOPPED a| This event happens when a cache is stopped, which includes the following actions: + +* The cluster is deactivated. All caches on all server nodes are stopped. +* `IgniteCache.close()` is called. The event is triggered on the node where the method is called. +* A SQL table is dropped. +* If you call `cache = Ignite.getOrCreateCache(...)` and then call `Ignite.close()`, the `cache` is also closed on that node. + +|All nodes where the cache is stopped. + +| EVT_CACHE_NODES_LEFT | All nodes that host a specific cache have left the cluster. This can happen when a cache is deployed on a subset of server nodes or when all server nodes leave the cluster and only client nodes remain. | All remaining nodes. +|=== + + +== Cache Events +Cache events are instances of the link:{events_url}/CacheEvent.html[CacheEvent] class and +represent the operations on cache objects, such as 'get', 'put', 'remove', 'lock', etc. + +Each event contains the information about the cache, the key that is accessed by the operation, the value before and after the operation (if applicable), etc. + +Cache events are also generated when you use DML commands. + + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_CACHE_OBJECT_PUT | An object is put to a cache. This event is fired for every invocation of `IgniteCache.put()`. The bulk operations, such as `putAll(...)`, produce multiple events of this type. + +| The primary and backup nodes for the entry. + +| EVT_CACHE_OBJECT_READ +| An object is read from a cache. +This event is not emitted when you use link:developers-guide/key-value-api/using-scan-queries[scan queries] (use <<Cache Query Events>> to monitor scan queries). +| The node where read operation is executed. +It can be either the primary or backup node (the latter case is only possible when link:developers-guide/configuring-caches/configuration-overview#readfrombackup[reading from backups is enabled]). +In transactional caches, the event can be generated on both the primary and backup nodes depending on the concurrency and isolation levels. + +| EVT_CACHE_OBJECT_REMOVED | An object is removed from a cache. |The primary and backup nodes for the entry. + +| EVT_CACHE_OBJECT_LOCKED +a| A lock is acquired on a specific key. +Locks can be acquired only on keys in transactional caches. +User actions that acquire a lock include the following cases: + +* The user explicitly acquires a lock by calling `IgniteCache.lock()` or `IgniteCache.lockAll()`. +* A lock is acquired for every atomic (non-transactional) data modifying operation (put, update, remove). +In this case, the event is triggered on both primary and backup nodes for the key. +* Locks are acquired on the keys accessed within a transaction (depending on the link:developers-guide/key-value-api/transactions#concurrency-modes-and-isolation-levels[concurrency and isolation levels]). + +| The primary or/and backup nodes for the entry depending on the link:developers-guide/key-value-api/transactions#concurrency-modes-and-isolation-levels[concurrency and isolation levels]. + +| EVT_CACHE_OBJECT_UNLOCKED | A lock on a key is released. | The primary node for the entry. + +| EVT_CACHE_OBJECT_EXPIRED | The event is fired when a cache entry expires. This happens only if an link:developers-guide/configuring-caches/expiry-policies[expiry policy] is configured. | The primary and backup nodes for the entry. +| EVT_CACHE_ENTRY_CREATED | This event is triggered when Ignite creates an internal entry for working with a specific object from a cache. We don't recommend using this event. If you want to monitor cache put operations, the `EVT_CACHE_OBJECT_PUT` event should be enough for most cases. | The primary and backup nodes for the entry. + +| EVT_CACHE_ENTRY_DESTROYED +| This event is triggered when Ignite destroys an internal entry that was created for working with a specific object from a cache. +We don't recommend using it. +Destroying the internal entry does not remove any data from the cache. +If you want to monitor cache remove operations, use the `EVT_CACHE_OBJECT_REMOVED` event. +| The primary and backup nodes for the entry. +|=== + +== Cache Query Events + +There are two types of events that are related to cache queries: + +* Cache query object read events, which are instances of the link:{events_url}/CacheQueryReadEvent.html[CacheQueryReadEvent, window=_blank] class. +* Cache query executed events, which are instances of the link:{events_url}/CacheQueryExecutedEvent.html[CacheQueryExecutedEvent, window=_blank] class. + + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_CACHE_QUERY_OBJECT_READ | An object is read as part of a query execution. This event is generated for every object that matches the query filter. | The primary node of the object that is read. +| EVT_CACHE_QUERY_EXECUTED | This event is generated when a query is executed. | All server nodes that host the cache. +|=== + +//// +== Checkpointing Events + +Related to checkpointingspi in map-reduce + +Checkpointing events are instances of the link:{events_url}/CheckpointEvent.html[CheckpointEvent] class. + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_CHECKPOINT_LOADED | | The node +| EVT_CHECKPOINT_REMOVED | | +| EVT_CHECKPOINT_SAVED | | +|=== +//// + +== Class and Task Deployment Events + +Deployment events are instances of the link:{events_url}/DeploymentEvent.html[DeploymentEvent] class. + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_CLASS_DEPLOYED | A class (non-task) is deployed on a specific node. | The node where the class is deployed. +| EVT_CLASS_UNDEPLOYED | A class is undeployed. | The node where the class is undeployed. +| EVT_CLASS_DEPLOY_FAILED | Class deployment failed. |The node where the class is to be deployed. +| EVT_TASK_DEPLOYED | A task class is deployed on a specific node. | The node where the class is deployed. +| EVT_TASK_UNDEPLOYED | A task class is undeployed on a specific node.|The node where the class is undeployed. +| EVT_TASK_DEPLOY_FAILED | Class deployment failed.|The node where the class is to be deployed. +|=== + +== Discovery Events + +Discovery events occur when nodes (both servers and clients) join or leave the cluster, including cases when nodes leave due to a failure. + +Discovery events are instances of the link:{events_url}/DiscoveryEvent.html[DiscoveryEvent] class. + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_NODE_JOINED | A node joins the cluster. | All nodes in the cluster (other than the one that joined). +| EVT_NODE_LEFT | A node leaves the cluster. |All remaining nodes in the cluster. +| EVT_NODE_FAILED | The cluster detects that a node left the cluster in a non-graceful way. | All remaining nodes in the cluster. +| EVT_NODE_SEGMENTED | This happens on a node that decides that it was segmented. | The node that is segmented. +| EVT_CLIENT_NODE_DISCONNECTED | A client node loses connection to the cluster. | The client node that disconnected from the cluster. +| EVT_CLIENT_NODE_RECONNECTED | A client node reconnects to the cluster.| The client node that reconnected to the cluster. +|=== + +== Task Execution Events + +Task execution events are associated with different stages of link:developers-guide/distributed-computing/map-reduce[task execution]. +They are also generated when you execute link:developers-guide/distributed-computing/distributed-computing[simple closures] because internally a closure is treated as a task that produces a single job. + +//// +This is what happens when you execute a task through the compute interface: + +. Task is deployed on all nodes (associated with the compute interface) +. Task is started (the map stage) +. Jobs are executed on the remote nodes +. The reduce stage +//// + +Task Execution events are instances of the link:{events_url}/TaskEvent.html[TaskEvent] class. + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_TASK_STARTED | A task is started. `IgniteCompute.execute()` or other method is called | The node that initiates the task. +| EVT_TASK_REDUCED | This event represents the 'reduce' stage of the task execution flow. | The node where the task was started. +| EVT_TASK_FINISHED | The execution of the task finishes. | The node where the task was started. +| EVT_TASK_FAILED | The task failed | The node where the task was started. +| EVT_TASK_TIMEDOUT | The execution of the task timed out. This can happen when `Ignite.compute().withTimeout(...)` to execute tasks. When a task times out, it cancels all jobs that are being executed. It also generates the `EVT_TASK_FAILED` event.| The node where the task was started. +| EVT_TASK_SESSION_ATTR_SET | A job sets an attribute in the link:developers-guide/distributed-computing/map-reduce#distributed-task-session[session]. | The node where the job is executed. +|=== + +{sp}+ + +Job Execution events are instances of the link:{events_url}/JobEvent.html[JobEvent] class. +The job execution events are generated at different stages of job execution and are associated with particular instances of the job. +The event contains information about the task that produced the job (task name, task class, etc.). + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired + +| EVT_JOB_MAPPED | A job is mapped to a specific node. Mapping happens on the node where the task is started. This event is generated for every job produced in the "map" stage. | The node that started the task. + +| EVT_JOB_QUEUED | The job is added to the queue on the node to which it was mapped. | The node where the job is scheduled for execution. + +| EVT_JOB_STARTED | Execution of the job started.| The node where the job is executed. + +| EVT_JOB_FINISHED | Execution of the job finished. This also includes cases when the job is cancelled.| The node where the job is executed. + +| EVT_JOB_RESULTED | The job returned a result to the node from which it was sent. | The node where the task was started. + +| EVT_JOB_FAILED | Execution of a job fails. If the job failover strategy is configured (default), this event is accompanied by the `EVT_JOB_FAILED_OVER` event. | The node where the job is executed. + +| EVT_JOB_FAILED_OVER | The job was failed over to another node. | The node that started the task. + +| EVT_JOB_TIMEDOUT | The job timed out. | + +| EVT_JOB_REJECTED | The job is rejected. The job can be rejected if a link:developers-guide/distributed-computing/job-scheduling[collision spi] is configured. | The node where the job is rejected. + +| EVT_JOB_CANCELLED | The job was cancelled. | The node where the job is being executed. +|=== + + +== Cache Rebalancing Events + +Cache Rebalancing events (all except for `EVT_CACHE_REBALANCE_OBJECT_LOADED` and `EVT_CACHE_REBALANCE_OBJECT_UNLOADED`) are instances of the link:{events_url}/CacheRebalancingEvent.html[CacheRebalancingEvent] class. + +Rebalancing occurs on a per cache basis; therefore, each rebalancing event corresponds to a specific cache. +The event contains the name of the cache. + +The process of moving a single cache partition from Node A to Node B consists of the following steps: + +. Node A supplies a partition (REBALANCE_PART_SUPPLIED). The objects from the partition start to move to node B. +. Node B receives the partition data (REBALANCE_PART_LOADED). +. Node A removes the partition from its storage (REBALANCE_PART_UNLOADED). + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_CACHE_REBALANCE_STARTED | The rebalancing of a cache starts. | All nodes that host the cache. +| EVT_CACHE_REBALANCE_STOPPED | The rebalancing of a cache stops. | All nodes that host the cache. +| EVT_CACHE_REBALANCE_PART_LOADED | A cache's partition is loaded on the new node. This event is fired for every partition that participates in the cache rebalancing.| The node where the partition is loaded. +| EVT_CACHE_REBALANCE_PART_UNLOADED |A cache's partition is removed from the node after it has been loaded to its new destination. | The node where the partition was held before the rebalancing process started. +| EVT_CACHE_REBALANCE_OBJECT_LOADED | An object is moved to a new node as part of cache rebalancing. | The node where the object is loaded. +| EVT_CACHE_REBALANCE_OBJECT_UNLOADED | An object is removed from a node after it has been moved to a new node.| The node from which the object is removed. +| EVT_CACHE_REBALANCE_PART_DATA_LOST | A partition that is to be rebalanced is lost, for example, due to a node failure. | +| EVT_CACHE_REBALANCE_PART_SUPPLIED | A node supplies a cache partition as part of the rebalancing process. | The node that owns the partition. +//| EVT_CACHE_REBALANCE_PART_MISSED | *TODO*| +|=== + +== Transaction Events + +Transaction events are instances of the link:{events_url}/TransactionStateChangedEvent.html[TransactionStateChangedEvent] class. +They allow you to get notification about different stages of transaction execution. Each event contains the `Transaction` object this event is associated with. + + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_TX_STARTED | A transaction is started. Note that in transactional caches, each atomic operation executed outside a transaction is considered a transaction with a single operation. | The node where the transaction was started. +| EVT_TX_COMMITTED | A transaction is committed. | The node where the transaction was started. +| EVT_TX_ROLLED_BACK | A transaction is rolled back. |The node where the transaction was executed. +| EVT_TX_SUSPENDED | A transaction is suspended.|The node where the transaction was started. +| EVT_TX_RESUMED | A transaction is resumed. |The node where the transaction was started. +|=== + + +== Management Task Events + +Management task events represent the tasks that are executed by Visor or Web Console. +This event type can be used to monitor a link:administrators-guide/security/cluster-monitor-audit[Web Console activity]. + +[cols="2,5,3",opts="header"] +|=== +| Event Type | Event Description | Where Event Is Fired +| EVT_MANAGEMENT_TASK_STARTED | A task from Visor or Web Console starts. | The node where the task is executed. +|=== + + diff --git a/docs/_docs/developers-guide/events/listening-to-events.adoc b/docs/_docs/developers-guide/events/listening-to-events.adoc new file mode 100644 index 0000000..33dfa1a --- /dev/null +++ b/docs/_docs/developers-guide/events/listening-to-events.adoc @@ -0,0 +1,254 @@ += Working with Events +:javaFile: {javaCodeDir}/Events.java +:xmlFile: code-snippets/xml/events.xml + +== Overview +Ignite can generate events for a variety of operations happening in the cluster and notify your application about those operations. There are many types of events, including cache events, node discovery events, distributed task execution events, and many more. + +The list of events is available in the link:developers-guide/events/events[Events] section. + +== Enabling Events +By default, events are disabled, and you have to enable each event type explicitly if you want to use it in your application. +To enable specific event types, list them in the `includeEventTypes` property of `IgniteConfiguration` as shown below: + +[tabs] +-- +tab:XML[] +[source,xml] +---- +include::{xmlFile}[tags=**;!discovery, indent=0] +---- +tab:Java[] +[source,java] +---- +include::{javaFile}[tag=enabling-events,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/WorkingWithEvents.cs[tag=enablingEvents,indent=0] +---- +tab:C++[unsupported] +-- + +== Getting the Events Interface + +The events functionality is available through the events interface, which provides methods for listening to cluster events. The events interface can be obtained from an instance of `Ignite` as follows: + + +[tabs] +-- +tab:Java[] +[source,java] +---- +include::{javaFile}[tag=get-events,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/WorkingWithEvents.cs[tag=gettingEventsInterface1,indent=0] +---- +tab:C++[unsupported] +-- + +The events interface can be associated with a link:developers-guide/distributed-computing/cluster-groups[set of nodes]. This means that you can access events that happen on a given set of nodes. In the following example, the events interface is obtained for the set of nodes that host the data for the Person cache. + +[tabs] +-- +tab:Java[] +[source,java] +---- +include::{javaFile}[tag=get-events-for-cache,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/WorkingWithEvents.cs[tag=gettingEventsInterface2,indent=0] +---- +tab:C++[unsupported] +-- + + +== Listening to Events + +You can listen to either local or remote events. Local events are events that are generated on the node where the listener is registered. Remote events are events that happen on other nodes. + +Note that some events may be fired on multiple nodes even if the corresponding real-world event happens only once. For example, when a node leaves the cluster, the `EVT_NODE_LEFT` event is generated on every remaining node. + +Another example is when you put an object into a cache. In this case, the `EVT_CACHE_OBJECT_PUT` event occurs on the node that hosts the link:developers-guide/data-modeling/data-partitioning#backup-partitions[primary partition] into which the object is actually written, which may be different from the node where the `put(...)` method is called. In addition, the event is fired on all nodes that hold the link:developers-guide/data-modeling/data-partitioning#backup-partitions[backup partiti [...] + +The events interface provides methods for listening to local events only, and for listening to both local and remote events. + +=== Listening to Local Events + +To listen to local events, use the `localListen(listener, eventTypes...)` method, as shown below. The method accepts an event listener that is called every time an event of the given type occurs on the local node. + +To unregister the local listener, return `false` in its functional method. + +[tabs] +-- +tab:Java[] +[source,java] +---- +include::{javaFile}[tag=local,indent=0] +---- + +The event listener is an object of the `IgnitePredicate<T>` class with a type argument that matches the type of events the listener is going to process. +For example, cache events (`EVT_CACHE_OBJECT_PUT`, `EVT_CACHE_OBJECT_READ`, etc.) correspond to the link:{javadoc_base_url}/org/apache/ignite/events/CacheEvent.html[CacheEvent] class, discovery events (`EVT_NODE_LEFT`, `EVT_NODE_JOINED`, etc.) correspond to +the link:{javadoc_base_url}/org/apache/ignite/events/DiscoveryEvent.html[DiscoveryEvent,window=_blank] class, and so on. +If you want to listen to events of different types, you can use the generic link:{javadoc_base_url}/org/apache/ignite/events/Event.html[Event,window=_blank] interface: + +[source, java] +------------------------------------------------------------------------------- +IgnitePredicate<Event> localListener = evt -> { + // process the event + return true; +}; +------------------------------------------------------------------------------- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/WorkingWithEvents.cs[tag=localListen,indent=0] +---- +tab:C++[unsupported] +-- + +=== Listening to Remote Events + +The `IgniteEvents.remoteListen(localListener, filter, types)` method can be used to register a listener that listens for both remote and local events. +It accepts a local listener, a filter, and a list of event types you want to listen to. + +The filter is deployed to all the nodes associated with the events interface, including the local node. The events that pass the filter are sent to the local listener. + +The method returns a unique identifier that can be used to unregister the listener and filters. To do this, call `IgniteEvents.stopRemoteListen(uuid)`. Another way to unregister the listener is to return `false` in the `apply()` method. + +[tabs] +-- +tab:Java[] +[source,java] +---- +include::{javaFile}[tag=remote,indent=0] +---- +tab:C#/.NET[unsupported] +tab:C++[unsupported] +-- + +//////////////////////////////////////////////////////////////////////////////// +TODO +The `IgniteEvents.remoteListen(...)` has an asynchronous counterpart that will register the given listener asynchronously. + +++++ +<code-tabs> +<code-tab data-tab="Java"> +++++ +[source,java] +---- + +---- +++++ +</code-tab> +<code-tab data-tab="C#/.NET"> +++++ +[source,csharp] +---- + +---- +++++ +</code-tab> +</code-tabs> +++++ + +//////////////////////////////////////////////////////////////////////////////// + +=== Batching Events + +Each activity in a cache can result in an event notification being generated and sent. For systems with high cache activity, getting notified for every event could be network intensive, possibly leading to a decreased performance of cache operations. + +Event notifications can be grouped together and sent in batches or timely intervals to mitigate the impact on performance. Here is an example of how this can be done: + +[tabs] +-- +tab:Java[] +[source,java] +---- +include::{javaFile}[tag=batching,indent=0] +---- +tab:C#/.NET[unsupported] +tab:C++[unsupported] +-- + +== Storing and Querying Events + +You can configure an event storage that will keep events on the nodes where they occur. You can then query events in your application. + +The event storage can be configured to keep events for a specific period, keep only the most recent events, or keep the events that satisfy a specific filter. See the link:{javadoc_base_url}/org/apache/ignite/spi/eventstorage/memory/MemoryEventStorageSpi.html[MemoryEventStorageSpi,window=_blank] javadoc for details. + +Below is an example of event storage configuration: + + +[tabs] +-- +tab:XML[] +[source,xml] +---- +<bean class="org.apache.ignite.configuration.IgniteConfiguration"> + + <property name="eventStorageSpi" > + <bean class="org.apache.ignite.spi.eventstorage.memory.MemoryEventStorageSpi"> + <property name="expireAgeMs" value="600000"/> + </bean> + </property> + +</bean> +---- +tab:Java[] +[source,java] +---- +include::{javaFile}[tag=event-storage,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/WorkingWithEvents.cs[tag=storingEvents,indent=0] +---- +tab:C++[unsupported] +-- + +=== Querying Local Events + +The following example shows how you can query local `EVT_CACHE_OBJECT_PUT` events stored in the event storage. + +[tabs] +-- +tab:Java[] +[source,java] +---- +include::{javaFile}[tag=query-local-events,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/WorkingWithEvents.cs[tag=queryLocal,indent=0] +---- +tab:C++[unsupported] +-- + + +=== Querying Remote Events +Here is an example of querying remote events: + +[tabs] +-- +tab:Java[] +[source,java] +---- +include::{javaFile}[tag=query-remote-events,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/WorkingWithEvents.cs[tag=queryRemote,indent=0] +---- +tab:C++[unsupported] +-- + diff --git a/docs/_docs/developers-guide/memory-configuration/data-regions.adoc b/docs/_docs/developers-guide/memory-configuration/data-regions.adoc new file mode 100644 index 0000000..bc3d5a7 --- /dev/null +++ b/docs/_docs/developers-guide/memory-configuration/data-regions.adoc @@ -0,0 +1,70 @@ += Configuring Data Regions + +== Overview +Ignite uses the concept of _data regions_ to control the amount of RAM available to a cache or a group of caches. A data region is a logical extendable area in RAM in which cached data resides. You can control the initial size of the region and the maximum size it can occupy. In addition to the size, data regions control link:developers-guide/persistence/native-persistence[persistence settings] for caches. + +By default, there is one data region that can take up to 20% of RAM available to the node, and all caches you create are placed in that region; but you can add as many regions as you want. There are a couple of reasons why you may want to have multiple regions: + +* Regions allow you to configure the amount of RAM available to a cache or number of caches. +* Persistence parameters are configured per region. If you want to have both in-memory only caches and the caches that store their content to disk, you need to configure two (or more) data regions with different persistence settings: one for in-memory caches and one for persistent caches. +* Some memory parameters, such as link:developers-guide/memory-configuration/eviction-policies[eviction policies], are configured per data region. + +See the following section to learn how to change the parameters of the default data region or configure multiple data regions. + +== Configuring Default Data Region + +By default, a new cache is added to the default data region. If you want to change the properties of the default data region, you can do so in the data storage configuration. + + +:xmlFile: code-snippets/xml/data-regions-configuration.xml +:javaFile: {javaCodeDir}/DataRegionConfigurationExample.java + +[tabs] +-- +tab:XML[] + +[source,xml] +---- +include::{xmlFile}[tags=!*;ignite-config;default;!discovery,indent=0] +---- +tab:Java[] +[source,java] +---- +include::{javaFile}[tags=!*;ignite-config;default,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/MemoryArchitecture.cs[tag=DefaultDataReqion,indent=0] +---- +tab:C++[unsupported] +-- + +== Adding Custom Data Regions + +In addition to the default data region, you can add more data regions with custom settings. +In the following example, we configure a data region that can take up to 40 MB and uses the link:developers-guide/memory-configuration/eviction-policies#random-2-lru[Random-2-LRU] eviction policy. +Note that further below in the configuration, we create a cache that resides in the new data region. + +[tabs] +-- +tab:XML[] +[source,xml] +---- +include::{xmlFile}[tags=!*;ignite-config;data-region;default;caches;!discovery,indent=0] +---- + +For the full list of properties, refer to the link:{javadoc_base_url}/org/apache/ignite/configuration/DataStorageConfiguration.html[DataStorageConfiguration] javadoc. +tab:Java[] +[source,java] +---- +include::{javaFile}[tags=ignite-config,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/MemoryArchitecture.cs[tag=mem,indent=0] +---- +tab:C++[unsupported] +-- + diff --git a/docs/_docs/developers-guide/memory-configuration/eviction-policies.adoc b/docs/_docs/developers-guide/memory-configuration/eviction-policies.adoc new file mode 100644 index 0000000..b60a0d1 --- /dev/null +++ b/docs/_docs/developers-guide/memory-configuration/eviction-policies.adoc @@ -0,0 +1,163 @@ += Eviction Policies + +When link:developers-guide/persistence/native-persistence[Native Persistence] is off, Ignite holds all cache entries in the off-heap memory and allocates pages as new data comes in. +When a memory limit is reached and Ignite cannot allocate a page, some of the data must be purged from memory to avoid OutOfMemory errors. +This process is called _eviction_. Eviction prevents the system from running out of memory but at the cost of losing data and having to reload it when you need it again. + +Eviction is used in following cases: + +* for off-heap memory when link:developers-guide/persistence/native-persistence[Native Persistence] is off; +* for off-heap memory when Ignite is used with an link:developers-guide/persistence/external-storage[external storage]; +* for link:developers-guide/configuring-caches/on-heap-caching[on-heap caches]; +* for link:developers-guide/near-cache[near caches] if configured. + +When Native Persistence is on, a similar process — called _page replacement_ — is used to free up off-heap memory when Ignite cannot allocate a new page. +The difference is that the data is not lost (because it is stored in the persistent storage), and therefore you are less concerned about losing data than about efficiency. +Page replacement is automatically handled by Ignite and is not user-configurable. + +== Off-Heap Memory Eviction + +Off-heap memory eviction is implemented as follows. + +When memory usage exceeds the preset limit, Ignite applies one of the preconfigured algorithms to select a memory page that is most suitable for eviction. +Then, each cache entry contained in the page is removed from the page. +However, if an entry is locked by a transaction, it is retained. +Thus, either the entire page or a large chunk of it is emptied and is ready to be reused. + +image::images/off_heap_memory_eviction.png[Off-Heap Memory Eviction Mechanism] + +By default, off-heap memory eviction is disabled, which means that the used memory constantly grows until it reaches its limit. +To enable eviction, specify the page eviction mode in the link:developers-guide/memory-configuration/data-regions/[data region configuration]. +Note that off-heap memory eviction is configured per link:developers-guide/memory-configuration/data-regions[data region]. +If you don't use data regions, you have to explicitly add default data region parameters in your configuration to be able to configure eviction. + +By default, eviction starts when the overall RAM consumption by a region gets to 90%. +Use the `DataRegionConfiguration.setEvictionThreshold(...)` parameter if you need to initiate eviction earlier or later. + +Ignite supports two page selection algorithms: + +* Random-LRU +* Random-2-LRU + +The differences between the two are explained below. + +=== Random-LRU + +To enable the Random-LRU eviction algorithm, configure the data region as shown below: + +[tabs] +-- +tab:XML[] +[source,xml] +---- + +include::code-snippets/xml/eviction.xml[tags=ignite-config;!discovery, indent=0] + +<bean class="org.apache.ignite.configuration.IgniteConfiguration"> + <!-- Memory configuration. --> + <property name="dataStorageConfiguration"> + <bean class="org.apache.ignite.configuration.DataStorageConfiguration"> + <property name="dataRegionConfigurations"> + <list> + <!-- + Defining a data region that consumes up to 20 GB of RAM. + --> + <bean class="org.apache.ignite.configuration.DataRegionConfiguration"> + <!-- Custom region name. --> + <property name="name" value="20GB_Region"/> + + <!-- 500 MB initial size (RAM). --> + <property name="initialSize" value="#{500L * 1024 * 1024}"/> + + <!-- 20 GB maximum size (RAM). --> + <property name="maxSize" value="#{20L * 1024 * 1024 * 1024}"/> + + <!-- Enabling RANDOM_LRU eviction for this region. --> + <property name="pageEvictionMode" value="RANDOM_LRU"/> + </bean> + </list> + </property> + </bean> + </property> + + <!-- The rest of the configuration. --> +</bean> +---- +tab:Java[] +[source,java] +---- +include::{javaCodeDir}/EvictionPolicies.java[tag=randomLRU,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/EvictionPolicies.cs[tag=randomLRU,indent=0] +---- +tab:C++[unsupported] +-- + +Random-LRU algorithm works as follows: + +* Once a memory region defined by a memory policy is configured, an off-heap array is allocated to track the 'last usage' timestamp for every individual data page. +* When a data page is accessed, its timestamp gets updated in the tracking array. +* When it is time to evict a page, the algorithm randomly chooses 5 indexes from the tracking array and evicts the page with the oldest timestamp. If some of the indexes point to non-data pages (index or system pages), then the algorithm picks another page. + +=== Random-2-LRU + +To enable Random-2-LRU eviction algorithm, which is a scan-resistant version of Random-LRU, configure the data region, as shown in the example below: + +[tabs] +-- +tab:XML[] +[source,xml] +---- +<bean class="org.apache.ignite.configuration.IgniteConfiguration"> + <!-- Memory configuration. --> + <property name="dataStorageConfiguration"> + <bean class="org.apache.ignite.configuration.DataStorageConfiguration"> + <property name="dataRegionConfigurations"> + <list> + <!-- + Defining a data region that consumes up to 20 GB of RAM. + --> + <bean class="org.apache.ignite.configuration.DataRegionConfiguration"> + <!-- Custom region name. --> + <property name="name" value="20GB_Region"/> + + <!-- 500 MB initial size (RAM). --> + <property name="initialSize" value="#{500L * 1024 * 1024}"/> + + <!-- 20 GB maximum size (RAM). --> + <property name="maxSize" value="#{20L * 1024 * 1024 * 1024}"/> + + <!-- Enabling RANDOM_2_LRU eviction for this region. --> + <property name="pageEvictionMode" value="RANDOM_2_LRU"/> + </bean> + </list> + </property> + </bean> + </property> + + <!-- The rest of the configuration. --> +</bean> +---- +tab:Java[] +[source,java] +---- +include::{javaCodeDir}/EvictionPolicies.java[tag=random2LRU,indent=0] +---- +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/EvictionPolicies.cs[tag=random2LRU,indent=0] +---- +tab:C++[unsupported] +-- + +In Random-2-LRU, the two most recent access timestamps are stored for every data page. At the time of eviction, the algorithm randomly chooses 5 indexes from the tracking array and the minimum between two latest timestamps is taken for further comparison with corresponding minimums of four other pages that are chosen as eviction candidates. + +Random-LRU-2 outperforms LRU by resolving the "one-hit wonder" problem: if a data page is accessed rarely but accidentally accessed once, it's protected from eviction for a long time. + +== On-Heap Cache Eviction + +Refer to the link:developers-guide/configuring-caches/on-heap-caching#configuring-eviction-policy[Configuring Eviction Policy for On-Heap Caches] section for the instruction on how to configure eviction policy for on-heap caches. diff --git a/docs/_docs/developers-guide/memory-configuration/index.adoc b/docs/_docs/developers-guide/memory-configuration/index.adoc new file mode 100644 index 0000000..d1e43cf --- /dev/null +++ b/docs/_docs/developers-guide/memory-configuration/index.adoc @@ -0,0 +1,5 @@ +--- +layout: toc +--- += Memory Configuration + diff --git a/docs/_docs/developers-guide/restapi.adoc b/docs/_docs/developers-guide/restapi.adoc new file mode 100644 index 0000000..e583625 --- /dev/null +++ b/docs/_docs/developers-guide/restapi.adoc @@ -0,0 +1,2820 @@ += REST API +:request_table_props: cols="15%,10%,10%,45%,20%",options="header" +:response_table_props: cols="15%,15%,50%,20%",options="header" + +Ignite provides an HTTP REST client that gives you the ability to communicate with the grid over HTTP and HTTPS protocols using the REST approach. REST APIs can be used to perform different operations like read/write from/to cache, execute tasks, get various metrics, and more. + +Internally, Ignite uses Jetty to provide HTTP server features. See <<Configuration>> section below for details on how to configure jetty. + +== Getting Started + +To enable HTTP connectivity, make sure that the `ignite-rest-http` module is enabled. +If you use the binary distribution, copy the `ignite-rest-http` module from `IGNITE_HOME/libs/optional/` to the `IGNITE_HOME/libs` folder. +See link:developers-guide/setup#enabling-modules[Enabling modules] for details. + +Explicit configuration is not required; the connector starts up automatically and listens on port `8080`. You can check if it works with curl: + +[source,shell] +---- +curl 'http://localhost:8080/ignite?cmd=version' +---- + +Request parameters may be provided as either a part of URL or in a form data: + +[source,shell] +---- +curl 'http://localhost:8080/ignite?cmd=put&cacheName=myCache' -X POST -H 'Content-Type: application/x-www-form-urlencoded' -d 'key=testKey&val=testValue' +---- + +=== Configuration + +You can change HTTP server parameters as follows: + +[tabs] +-- +tab:XML[] + +[source, xml] +---- +include::code-snippets/xml/http-configuration.xml[tags=ignite-config;http-configuration;!discovery, indent=0] +---- +tab:Java[] + +[source, java] +---- +include::{javaCodeDir}/RESTConfiguration.java[tags=http-configuration, indent=0] +---- + +tab:C#/.NET[] + +tab:C++[unsupported] +-- + +The following table describes the properties of `ConnectorConfiguration` that are related to the http server: + +[width="100%", cols="30%,50%,10%,10%"] +|======= +| Parameter Name | Description |Optional |Default Value + +|`setSecretKey(String)` +|Defines secret key used for client authentication. When provided, client request must contain `HTTP header X-Signature` with the string "[1]:[2]", where [1] is timestamp in milliseconds and [2] is the Base64 encoded SHA1 hash of the secret key. +|Yes +|`null` + +|`setPortRange(int)` +|Port range for Jetty server. If the port provided in Jetty configuration or `IGNITE_JETTY_PORT` system property is already in use, Ignite iteratively increments port by 1 and tries to bind once again until provided port range is exceeded. +|Yes +|`100` + +|`setJettyPath(String)` +|Path to Jetty configuration file. Should be either absolute or relative to `IGNITE_HOME`. If the path is not set, Ignite starts a Jetty server with a simple HTTP connector. This connector uses `IGNITE_JETTY_HOST` and `IGNITE_JETTY_PORT` system properties as `host` and `port` respectively. If `IGNITE_JETTY_HOST` is not provided, `localhost` is used as default. If `IGNITE_JETTY_PORT` is not provided, port `8080` is used. +|Yes +|`null` + +|`setMessageInterceptor(ConnectorMessageInterceptor)` +|The interceptor provides the ability to transform all objects exchanged via REST protocol. For example, if you use custom serialisation on client you can write an interceptor to transform binary representations received from the client to Java objects and later access them from Java code directly. +|Yes +|`null` +|======= + +==== Example Jetty XML Configuration + +Path to this configuration should be set to `ConnectorConfiguration.setJettyPath(String)` as explained above. + +[source,xml] +---- +include::code-snippets/xml/jetty.xml[tags=, indent=0] +---- + +=== Security + +//NOTE: Refer to the link:https://www.gridgain.com/docs/tutorials/security/ssl-guide[SSL Guide] for a comprehensive instruction on SSL. + +When link:administrators-guide/security/authentication[authentication] is configured in the cluster, all applications that use REST API request authentication by providing security credentials. +The authentication request returns a session token that can be used with any command within that session. + +There are two ways to request authorization: + +. Use the authenticate command with `ignite.login=[user]&ignite.password=[password]` parameters. ++ +-- +---- +https://[host]:[port]/ignite?cmd=authenticate&ignite.login=[user]&ignite.password=[password] +---- +-- +. Use any REST command with `ignite.login=[user]&ignite.password=[password]` parameters in the path of your connection string. In our example below, we use the `version` command: ++ +-- +[source, curl] +---- +http://[host]:[port]/ignite?cmd=version&ignite.login=[user]&ignite.password=[password] +---- +-- +In both examples above, replace `[host]`, `[port]`, `[user]`, and `[password]` with actual values. + +Executing any one of the above strings in a browser returns a response with a session token which looks like this: + +---- +{"successStatus":0,"error":null,"sessionToken":"EF6013FF590348CE91DEAE9870183BEF","response":true} +---- + +Once you obtain the session token, use the sessionToken parameter with your connection string as shown in the example below: + +---- +http://[host]:[port]/ignite?cmd=top&sessionToken=[sessionToken] +---- + +In the above connection string, replace `[host]`, `[port]`, and `[sessionToken]` with actual values. + +[WARNING] +==== +Either user credentials or a session token is required when authentication is enabled on the server. +Failure to provide either a `sessionToken` or `user` & `password` parameters in the REST connection string results in an error: + +[source, json] +---- +{ + "successStatus":2, + "sessionToken":null, + "error":"Failed to handle request - session token not found or invalid", + "response":null +} +---- +==== + + +[NOTE] +==== +[discrete] +=== Session Token Expiration + +A session token is valid only for 30 seconds. Using an expired session token results in an error, like the one below: + +[source, json] +---- +{ + "successStatus":1, + "error":"Failed to handle request - unknown session token (maybe expired session) [sesTok=12FFFD4827D149068E9FFF59700E5FDA]", + "sessionToken":null, + "response":null +} +---- + +To set a custom expire time, set the system variable: `IGNITE_REST_SESSION_TIMEOUT` (in seconds). + +[source, text] +---- +-DIGNITE_REST_SESSION_TIMEOUT=3600 +---- + + +==== + +== Data Types +The REST API also provides support for Java built-in types for put/get operations via `keyType` and `valueType` optional parameters. +Note that unless one of the below mentioned types are explicitly specified, the REST protocol exchanges the key-value data in `String` format. +This means that the data is stored and retrieved to/from the cluster as a `String`. + +[width="100%", cols="50%,50%"] +|======= +| REST KeyType/ValueType | Corresponding Java Type + +|`boolean` +|`java.lang.Boolean` + +|`byte` +|`java.lang.Byte` + +|`short` +|`java.lang.Short` + +|`integer` +|`java.lang.Integer` + +|`long` +|`java.lang.Long` + +|`float` +|`java.lang.Float` + +|`double` +|`java.lang.Double` + +|`date` +|`java.sql.Date` + +The date value should be in the format as specified in the `valueOf(String)` method in the link:https://docs.oracle.com/javase/8/docs/api/java/sql/Date.html#valueOf-java.lang.String-[Java documentation ,window=_blank] + +Example: 2018-01-01 + +|`time` +|`java.sql.Time` + +The time value should be in the format as specified in the `valueOf(String)` method in the link:https://docs.oracle.com/javase/8/docs/api/java/sql/Date.html#valueOf-java.lang.String-[Java documentation ,window=_blank] + +Example: 01:01:01 + +|`timestamp` +|`java.sql.Timestamp` + +The timestamp value should be in the format as specified in the `valueOf(String)` method in the link:https://docs.oracle.com/javase/8/docs/api/java/sql/Date.html#valueOf-java.lang.String-[Java documentation ,window=_blank] + +Example: 2018-02-18%2001:01:01 + +|`uuid` +|`java.util.UUID` + +|`IgniteUuid` +|`org.apache.ignite.lang.IgniteUuid` +|======= + +The following example shows a `put` command with `keyType=int` and `valueType=date`: + +[source,text] +---- +http://[host]:[port]/ignite?cmd=put&key=1&val=2018-01-01&cacheName=myCache&keyType=int&valueType=date +---- + +Similarly, the `get` command with `keyType=int` and `valueType=date` would be: + +[source,text] +---- +http://[host]:[port]/ignite?cmd=get&key=1&cacheName=myCache&keyType=int&valueType=date +---- + + +== Returned Value +The HTTP REST request returns a JSON object which has a similar structure for each command: + +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`affinityNodeId` +|`string` +|Affinity node ID. +|`2bd7b049-3fa0-4c44-9a6d-b5c7a597ce37` + +|`error` +|`string` +|This field contains description of error if server could not handle the request. +|Specifically for each command. + +|`sessionToken` +|`string` +|When authentication is enabled on the server, this field contains a session token that can be used with any command within that session. If authentication is off, this field contains `null`. +When authentication is enabled - `EF6013FF590348CE91DEAE9870183BEF` +|Otherwise, `null`. + +|`response` +|`jsonObject` +|This field contains the result of the command. +|Specifically for each command. + +|`successStatus` +|`integer` +|Exit status code. It might have the following values: + +`success = 0` + +`failed = 1` + +`authorization failed = 2` + +`security check failed = 3` +|`0` +|======= + +== REST API Reference + +=== Version + +Returns the Ignite version. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=version +---- + +*Response:*:: ++ +[source,json] +---- +{ + "error": "", + "response": "1.0.0", + "successStatus": 0 +} +---- + +=== Activate + +Activates the cluster. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=activate +---- + +*Response:*:: ++ +[source,json] +---- +{ + "successStatus":0, + "error":null, + "sessionToken":null, + "response":"activate started" +} +---- + + +=== Deactivate +Starts the deactivation process for a persistence-enabled cluster. + +include::includes/note-on-deactivation.adoc[] + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=deactivate +---- + +*Response:*:: ++ +[source,json] +---- +{ + "successStatus":0, + "error":null, + "sessionToken":null, + "response":"deactivate started" +} +---- + + +=== Current State +Returns the current state (active/inactive) of the cluster. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=currentstate +---- + +*Response:*:: ++ +Returns `true` if the cluster is active. Returns `false` if the cluster in inactive. ++ +[source,json] +---- +{ + "successStatus":0, + "error":null, + "sessionToken":null, + "response":true +} +---- + +=== Increment + +Adds and gets current value of given atomic long. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=incr&cacheName={cacheName}&key={incrKey}&init={initialValue}&delta={delta} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, default cache is used. +| partitionedCache + +|`key` +| string +| +| The name of atomic long. +| counter + +|`init` +|long +| Yes +| Initial value. +| 15 + +|`delta` +| long +| +|Number to be added. +| 42 +|======= + +*Response:*:: ++ +The response contains the value after the operation. ++ +[source,json] +---- +{ + "affinityNodeId": "e05839d5-6648-43e7-a23b-78d7db9390d5", + "error": "", + "response": 42, + "successStatus": 0 +} +---- + +=== Decrement + +Subtracts and gets current value of given atomic long. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=decr&cacheName={cacheName}&key={key}&init={init_value}&delta={delta} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +|Yes +|Cache name. If not provided, the default cache ("default") is used. +|partitionedCache + +|`key` +|string +| +|The name of atomic long. +|counter + +|`init` +| long +| Yes +| Initial value. +| `15` + +|`delta` +|long +| +|Number to be subtracted. +|`42` +|======= + +*Response:*:: ++ +The response contains the value after the operation. ++ +[source,json] +---- +{ + "affinityNodeId": "e05839d5-6648-43e7-a23b-78d7db9390d5", + "error": "", + "response": -42, + "successStatus": 0 +} +---- + + + +=== Cache Metrics + +Shows metrics for a cache. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=cache&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "", + "error": "", + "response": { + "hits": 0, + "misses": 0, + "reads": 0, + "writes": 2 + }, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| jsonObject +| The JSON object contains cache metrics such as create time, count reads and etc. +a| +`{ + "createTime": 1415179251551, "hits": 0, "misses": 0, "readTime":1415179251551, "reads": 0,"writeTime": 1415179252198, "writes": 2 +}` +|======= + +=== Cache Size +Gets the number of all entries cached across all nodes. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=size&cacheName={cacheName} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "", + "error": "", + "response": 1, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| number +| Number of all entries cached across all nodes. +| 5 +|======= + +=== Cache Metadata +Gets metadata for a cache. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=metadata&cacheName={cacheName} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| String +| Yes +| Cache name. If not provided, metadata for all user caches is returned. +| partitionedCache +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "error": "", + "response": { + "cacheName": "partitionedCache", + "types": [ + "Person" + ], + "keyClasses": { + "Person": "java.lang.Integer" + }, + "valClasses": { + "Person": "org.apache.ignite.Person" + }, + "fields": { + "Person": { + "_KEY": "java.lang.Integer", + "_VAL": "org.apache.ignite.Person", + "ID": "java.lang.Integer", + "FIRSTNAME": "java.lang.String", + "LASTNAME": "java.lang.String", + "SALARY": "double" + } + }, + "indexes": { + "Person": [ + { + "name": "ID_IDX", + "fields": [ + "id" + ], + "descendings": [], + "unique": false + }, + { + "name": "SALARY_IDX", + "fields": [ + "salary" + ], + "descendings": [], + "unique": false + } + ] + } + }, + "sessionToken": "", + "successStatus": 0 +} +---- + + +=== Compare-And-Swap + +Stores a given key-value pair in a cache only if the previous value is equal to the expected value passed in. + + +*Request:*:: ++ +[source,shell] +---- +https://[host]:[port]/ignite?cmd=authenticate&ignite.login=[user]&ignite.password=[password] +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +|Key to store in cache. +| name + +|`val` +| string +| +| Value associated with the given key. +| Jack + +|`val2` +| string +| +| Expected value. +| Bob + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +The response returns `true` if the value was replaced, `false` otherwise. ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- + +=== Append + +Appends a line for value which is associated with key. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=append&key={appendKey}&val={_suffix}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +| `key` +| string +| +| Key to store in cache. +| name + +|`val` +|string +| +| Value to be appended to the current value. +| Jack + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| boolean +| `true` if replace happened, `false` otherwise. +| true +|======= + + +=== Prepend + +Adds prefix to the value that is associated with a given key. + +*Request:* :: ++ +[source,shell] +---- +http://host:port/ignite?cmd=prepend&key={key}&val={value}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| myCache + +|`key` +|string +| +|Key to store in cache. +|name + +|`val` +|string +| +| The string to be prepended to the current value. +|Name_ + +|`destId` +|string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +|boolean +| `true` if replace happened, `false` otherwise. +| true +|======= + + +=== Replace + +Stores a given key-value pair in a cache if the cache already contains the key. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=rep&key=repKey&val=newValue&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key to store in cache. +| name + +|`val` +| string +| +| Value associated with the given key. +| Jack + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 + +| `exp` +| long +| Yes +| Expiration time in milliseconds for the entry. When the parameter is set, the operation is executed with link:developers-guide/configuring-caches/expiry-policies[ModifiedExpiryPolicy]. +| 60000 + +|======= + +*Response:*:: ++ +The response contains `true` if the value was replaced, `false` otherwise. ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- + +=== Get +Retrieves the value mapped to a specified key from a cache. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=get&key={getKey}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key whose associated value is to be returned. +| testKey + +|`keyType` +| Java built-in type +| Yes +| See <<Data Types>> for more details. +| + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + + +=== Get All +Retrieves values mapped to the specified keys from a given cache. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=getall&k1={getKey1}&k2={getKey2}&k3={getKey3}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`k1...kN` +| string +| +| Keys whose associated values are to be returned. +| key1, key2, ..., keyN + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "", + "error": "", + "response": { + "key1": "value1", + "key2": "value2" + }, + "successStatus": 0 +} +---- ++ +[NOTE] +==== +[discrete] +=== Get output as array + +To obtain the output as an array, use the `IGNITE_REST_GETALL_AS_ARRAY=true` system property. +Once the property is set, the `getall` command provides the response in the following format: + +`{“successStatus”:0,“affinityNodeId”:null,“error”:null,“sessionToken”:null,“response”:[{“key”:“key1”,“value”:“value1”},{“key”:“key2”,“value”:“value2”}]}` +==== + + +=== Get and Remove +Removes the given key mapping from cache and returns the previous value. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=getrmv&cacheName={cacheName}&destId={nodeId}&key={key} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +|string +| +| Key whose mapping is to be removed from the cache. +| name + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": value, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| jsonObject +| Value for the key. +| `{"name": "bob"}` +|======= + +=== Get and Put +Stores a given key-value pair in a cache and returns the existing value if there is one. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=getput&key=getKey&val=newVal&cacheName={cacheName} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key to be associated with value. +| name + +|`val` +| string +| +| Value to be associated with key. +| Jack + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 + +|======= + +*Response:*:: ++ +The response contains the previous value for the key. ++ +[source,json] +---- +{ + "affinityNodeId": "2bd7b049-3fa0-4c44-9a6d-b5c7a597ce37", + "error": "", + "response": {"name": "bob"}, + "successStatus": 0 +} +---- + + + +=== Get and Put If Absent +Stores given key-value pair in cache only if cache had no previous mapping for it. If cache previously contained value for the given key, then this value is returned. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=getputifabs&key=getKey&val=newVal&cacheName={cacheName} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key to be associated with value. +| name + +|`val` +| string +| +| Value to be associated with key. +| Jack + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "2bd7b049-3fa0-4c44-9a6d-b5c7a597ce37", + "error": "", + "response": "value", + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| jsonObject +| Previous value for the given key. +|`{"name": "bob"}` +|======= + + + +=== Get and Replace + +Stores a given key-value pair in cache only if there is a previous mapping for it. + +*Request:* :: ++ +[source,shell] +---- +http://host:port/ignite?cmd=getrep&key={key}&val={val}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key to store in cache. +| name + +|`val` +| string +| +| Value associated with the given key. +| Jack + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +The response contains the previous value associated with the specified key. ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": oldValue, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +|jsonObject +| The previous value associated with the specified key. +| `{"name": "Bob"}` +|======= + +=== Replace Value + +Replaces the entry for a key only if currently mapped to a given value. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=repval&key={key}&val={newValue}&val2={oldVal}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key to store in cache. +| name + +|`val` +| string +| +| Value associated with the given key. +| Jack + +|`val2` +| string +| +|Value expected to be associated with the specified key. +|oldValue + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| boolean +| `true` if replace happened, `false` otherwise. +|true +|======= + +=== Remove + +Removes the given key mapping from cache. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=rmv&key={rmvKey}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key - for which the mapping is to be removed from cache. +| name + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` + | boolean + | `true` if replace happened, `false` otherwise. + |true +|======= + + + +=== Remove All + +Removes given key mappings from a cache. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=rmvall&k1={rmKey1}&k2={rmKey2}&k3={rmKey3}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +| `cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +| `k1...kN` +|string +| +|Keys whose mappings are to be removed from the cache. +|name + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| boolean +| `true` if replace happened, `false` otherwise. +|true +|======= + +=== Remove Value + +Removes the mapping for a key only if currently mapped to the given value. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=rmvval&key={rmvKey}&val={rmvVal}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +| `cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +| `key` +| string +| +| Key whose mapping is to be removed from the cache. +| name + +|`val` +| string +| +| Value expected to be associated with the specified key. +| oldValue + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| boolean +| `false` if there was no matching key. +|true +|======= + + +=== Add + +Stores a given key-value pair in a cache if the cache does not contain the key. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=add&key=newKey&val=newValue&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key to be associated with the value. +| name + +|`val` +| string +| +| Value to be associated with the key. +| Jack + +|`destId` +|string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 + +|`exp` +| long +| Yes +| Expiration time in milliseconds for the entry. When the parameter is set, the operation is executed with link:developers-guide/configuring-caches/expiry-policies[ModifiedExpiryPolicy]. +| 60000 + +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| boolean +| `true` if value was stored in cache, `false` otherwise. +| true +|======= + + +=== Put + +Stores a given key-value pair in a cache. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=put&key=newKey&val=newValue&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key to be associated with values. +| name + +|`val` +| string +| +| Value to be associated with keys. +| Jack + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 + +|`exp` +| long +| Yes +|Expiration time in milliseconds for the entry. When the parameter is set, the operation is executed with link:developers-guide/configuring-caches/expiry-policies[ModifiedExpiryPolicy]. +| 60000 + +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| boolean +| `true` if value was stored in cache, `false` otherwise. +|true +|======= + + +=== Put all +Stores the given key-value pairs in cache. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=putall&k1={putKey1}&k2={putKey2}&k3={putKey3}&v1={value1}&v2={value2}&v3={value3}&cacheName={cacheName}&destId={nodeId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`k1...kN` +| string +| +| Keys to be associated with values. +| name + +|`v1...vN` +| string +| +| Values to be associated with keys. +| Jack + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "1bcbac4b-3517-43ee-98d0-874b103ecf30", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +|boolean +|`true` if the values were stored in cache, `false` otherwise. +|true +|======= + + +=== Put If Absent + +Stores a given key-value pair in a cache if the cache does not contain the given key. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=putifabs&key={getKey}&val={newVal}&cacheName={cacheName} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key to be associated with value. +| name + +|`val` +| string +| +| Value to be associated with key. +| Jack + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 + +|`exp` +| long +| Yes +| Expiration time in milliseconds for the entry. When the parameter is set, the operation is executed with link:developers-guide/configuring-caches/expiry-policies[ModifiedExpiryPolicy]. +| 60000 + +|======= + +*Response:*:: ++ +The response field contains `true` if the entry was put, `false` otherwise. ++ +[source,json] +---- +{ + "affinityNodeId": "2bd7b049-3fa0-4c44-9a6d-b5c7a597ce37", + "error": "", + "response": true, + "successStatus": 0 +} +---- + + +=== Contains Key + +Determines if cache contains an entry for the specified key. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=conkey&key={getKey}&cacheName={cacheName} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`key` +| string +| +| Key whose presence in this cache is to be tested. +| testKey + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "2bd7b049-3fa0-4c44-9a6d-b5c7a597ce37", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| boolean +| `true` if this map contains a mapping for the specified key. +| true +|======= + +=== Contains keys + +Determines if cache contains any entries for the specified keys. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=conkeys&k1={getKey1}&k2={getKey2}&cacheName={cacheName} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache + +|`k1...kN` +| string +| +| Key whose presence in this cache is to be tested. +| key1, key2, ..., keyN + +|`destId` +| string +| Yes +| Node ID for which the metrics are to be returned. +| `8daab5ea-af83-4d91-99b6-77ed2ca06647` +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "affinityNodeId": "2bd7b049-3fa0-4c44-9a6d-b5c7a597ce37", + "error": "", + "response": true, + "successStatus": 0 +} +---- ++ +[{response_table_props}] +|======= +|Field +|Type +|Description +|Example + +|`response` +| boolean +| `true` if this cache contains a mapping for the specified keys. +| true +|======= + + +=== Get or Create Cache +Creates a cache with the given name if it does not exist. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=getorcreate&cacheName={cacheName} +---- ++ +[width="100%", cols="15%,15%,15%,55%", opts="header"] +|======= +|Parameter +|Type +|Optional +|Description + +|`cacheName` +| String +| Yes +| Cache name. If not provided, the default cache is used. + +|`backups` +| int +| Yes +| Number of backups for cache data. Default is 0. + +|`dataRegion` +| String +| Yes +| Name of the data region the cache should belong to. + +|`templateName` +| String +| Yes +| Name of the cache template registered in Ignite to use as a configuration for the distributed cache. See the link:developers-guide/configuring-caches/configuration-overview#cache-templates[Cache Template, window=_blank] section for more information. + +|`cacheGroup` +| String +| Yes +| Name of the group the cache should belong to. + +|`writeSynchronizationMode` +| String +| Yes +a|Sets the write synchronization mode for the given cache: + +- `FULL_SYNC` +- `FULL_ASYNC` +- `PRIMARY_SYNC` +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "error": "", + "response": null, + "successStatus": 0 +} +---- + + +=== Destroy cache +Destroys cache with given name. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=destcache&cacheName={cacheName} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| partitionedCache +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "error": "", + "response": null, + "successStatus": 0 +} +---- + +=== Node + +Gets information about a node. + + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=node&attr={includeAttributes}&mtr={includeMetrics}&id={nodeId}&caches={includeCaches} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`mtr` +| boolean +| Yes +| Response includes metrics, if this parameter is `true`. +| true + +|`attr` +| boolean +| Yes +| Response includes attributes, if this parameter is `true`. +| true + +|`ip` +| string +| +| This parameter is optional, if id parameter is passed. Response is returned for node which has the IP. +| 192.168.0.1 + +|`id` +| string +| +| This parameter is optional, if ip parameter is passed. Response is returned for node which has the node ID. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 + +|`caches` +|boolean +|Yes +| When set to `true` the cache information returned by node includes: name, mode, and SQL Schema. + + When set to `false` the node command does not return any cache information. + + Default value is `true`. +|true +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "error": "", + "response": { + "attributes": null, + "caches": {}, + "consistentId": "127.0.0.1:47500", + "defaultCacheMode": "REPLICATED", + "metrics": null, + "nodeId": "2d0d6510-6fed-4fa3-b813-20f83ac4a1a9", + "replicaCount": 128, + "tcpAddresses": ["127.0.0.1"], + "tcpHostNames": [""], + "tcpPort": 11211 + }, + "successStatus": 0 +} +---- + +=== Log +Shows server logs. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=log&from={from}&to={to}&path={pathToLogFile} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`from` +|integer +|Yes +|Number of line to start from. Parameter is mandatory if to is passed. +|`0` + +|`path` +|string +|Yes +|The path to log file. If not provided the a default one is used. +|`/log/cache_server.log` + +|`to` +|integer +|Yes +|Number to line to finish on. Parameter is mandatory if from is passed. +|`1000` +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "error": "", + "response": ["[14:01:56,626][INFO ][test-runner][GridDiscoveryManager] Topology snapshot [ver=1, nodes=1, CPUs=8, heap=1.8GB]"], + "successStatus": 0 +} +---- + + +=== Topology +Gets the information about cluster topology. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=top&attr=true&mtr=true&id=c981d2a1-878b-4c67-96f6-70f93a4cd241 +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`mtr` +| boolean +| Yes +| Response will include metrics, if this parameter is `true`. +| true + +|`attr` +| boolean +|Yes +| Response will include attributes, if this parameter is `true`. +| true + +|`ip` +| string +| Yes +| This parameter is optional, if the `id` parameter is passed. Response will be returned for node which has the IP. +| 192.168.0.1 + +|`id` +| string +| Yes +| This parameter is optional, if the `ip` parameter is passed. Response will be returned for node which has the node ID. +| 8daab5ea-af83-4d91-99b6-77ed2ca06647 + +|`caches` +| boolean +| Yes +| When set to `true` the cache information returned by top will include: `name`, `mode`, and `SQL Schema`. + When set to `false` the top command does not return any cache information. + Default value is `true`. + true +|======= + +*Response:*:: ++ +[source,json] +---- +{ + "error": "", + "response": [ + { + "attributes": { + ... + }, + "caches": [ + { + name: "", + mode: "PARTITIONED" + }, + { + name: "partitionedCache", + mode: "PARTITIONED", + sqlSchema: "partitionedCache" + } + ], + "consistentId": "127.0.0.1:47500", + "metrics": { + ... + }, + "nodeId": "96baebd6-dedc-4a68-84fd-f804ee1ed995", + "replicaCount": 128, + "tcpAddresses": ["127.0.0.1"], + "tcpHostNames": [""], + "tcpPort": 11211 + }, + { + "attributes": { + ... + }, + "caches": [ + { + name: "", + mode: "REPLICATED" + } + ], + "consistentId": "127.0.0.1:47501", + "metrics": { + ... + }, + "nodeId": "2bd7b049-3fa0-4c44-9a6d-b5c7a597ce37", + "replicaCount": 128, + "tcpAddresses": ["127.0.0.1"], + "tcpHostNames": [""], + "tcpPort": 11212 + } + ], + "successStatus": 0 +} +---- + +=== Execute a Task +Executes a given task in the cluster. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=exe&name=taskName&p1=param1&p2=param2&async=true +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`name` +| string +| +| Name of the task to execute. +| `summ` + +|`p1...pN` +| string +| Yes +| Argument of task execution. +| arg1...argN + +|`async` +| boolean +| Yes +| Determines whether the task is performed asynchronously. +| `true` +|======= + +*Response:*:: ++ +The response contains an error message, unique identifier of the task, the status and result of computation. ++ +[source,json] +---- +{ + "error": "", + "response": { + "error": "", + "finished": true, + "id": "~ee2d1688-2605-4613-8a57-6615a8cbcd1b", + "result": 4 + }, + "successStatus": 0 +} +---- + +=== Result of a Task + +Returns the computation result for a given task. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=res&id={taskId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`id` +| string +| +| ID of the task whose result is to be returned. +| 69ad0c48941-4689aae0-6b0e-4d52-8758-ce8fe26f497d{tilde}4689aae0-6b0e-4d52-8758-ce8fe26f497d +|======= + +*Response:*:: ++ +-- +The response contains information about errors (if any), ID of the task, and the status and result of computation. + +[source,json] +---- +{ + "error": "", + "response": { + "error": "", + "finished": true, + "id": "69ad0c48941-4689aae0-6b0e-4d52-8758-ce8fe26f497d~4689aae0-6b0e-4d52-8758-ce8fe26f497d", + "result": 4 + }, + "successStatus": 0 +} +---- +-- + +=== SQL Query Execute + +Runs SQL query over cache. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=qryexe&type={type}&pageSize={pageSize}&cacheName={cacheName}&arg1=1000&arg2=2000&qry={query} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`type` +| string +| +|Type for the query +|String + +|`pageSize` +| number +| +| Page size for the query. +| 3 + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| testCache + +|`arg1...argN` +| string +| +| Query arguments +|1000,2000 + +|`qry` +| strings +| +| Encoding sql query +| `salary+%3E+%3F+and+salary+%3C%3D+%3F` +|======= + +*Response:*:: ++ +The response object contains the items returned by the query, a flag indicating the last page, and `queryId`. ++ +[source,json] +---- +{ + "error":"", + "response":{ + "fieldsMetadata":[], + "items":[ + {"key":3,"value":{"name":"Jane","id":3,"salary":2000}}, + {"key":0,"value":{"name":"John","id":0,"salary":2000}}], + "last":true, + "queryId":0}, + "successStatus":0 +} +---- + +=== SQL Fields Query Execute +Runs SQL fields query over cache. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=qryfldexe&pageSize=10&cacheName={cacheName}&qry={qry} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`pageSize` +| number +| +| Page size for the query. +| 3 + +|`cacheName` +| string +| Yes +| Cache name. If not provided, the default cache is used. +| testCache + +|`arg1...argN` +| string +| +| Query arguments. +|1000,2000 + +|`qry` +| strings +| +| Encoding sql fields query. +|`select+firstName%2C+lastName+from+Person` +|======= + +*Response:*:: ++ +The response object contains the items returned by the query, fields query metadata, a flag indicating the last page, and `queryId`. ++ +[source,json] +---- +{ + "error": "", + "response": { + "fieldsMetadata": [ + { + "fieldName": "FIRSTNAME", + "fieldTypeName": "java.lang.String", + "schemaName": "person", + "typeName": "PERSON" + }, + { + "fieldName": "LASTNAME", + "fieldTypeName": "java.lang.String", + "schemaName": "person", + "typeName": "PERSON" + } + ], + "items": [["Jane", "Doe" ], ["John", "Doe"]], + "last": true, + "queryId": 0 + }, + "successStatus": 0 +} +---- + + +=== SQL Scan Query Execute + +Runs a scan query over a cache. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=qryscanexe&pageSize={pageSize}&cacheName={cacheName}&className={className} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`pageSize` +| Number +| +| Page size for the query +| 3 + +|`cacheName` +| String +| Yes +| Cache name. If not provided, the default cache is used. +| testCache + +|`className` +| String +| Yes +| Predicate class name for scan query. Class should implement `IgniteBiPredicate` interface. +| `org.apache.ignite.filters.PersonPredicate` +|======= + +*Response:*:: ++ +The response object contains the items returned by the scan query, fields query metadata, a flag indicating last page, and `queryId`. ++ +[source,json] +---- +{ + "error": "", + "response": { + "fieldsMetadata": [ + { + "fieldName": "key", + "fieldTypeName": "", + "schemaName": "", + "typeName": "" + }, + { + "fieldName": "value", + "fieldTypeName": "", + "schemaName": "", + "typeName": "" + } + ], + "items": [ + { + "key": 1, + "value": { + "firstName": "Jane", + "id": 1, + "lastName": "Doe", + "salary": 1000 + } + }, + { + "key": 3, + "value": { + "firstName": "Jane", + "id": 3, + "lastName": "Smith", + "salary": 2000 + } + } + ], + "last": true, + "queryId": 0 + }, + "successStatus": 0 +} +---- + + +=== SQL Query Fetch +Gets next page for the query. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=qryfetch&pageSize={pageSize}&qryId={queryId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + +|`pageSize` +| number +| +| Page size for the query. +| 3 + +|`qryId` +| number +| +| Query id that is returned from the `Sql query execute`, `sql fields query execute`, or `sql fetch` commands. +| 0 +|======= + +*Response:*:: ++ +The response object contains the items returned by the query, a flag indicating the last page, and `queryId`. ++ +[source,json] +---- +{ + "error":"", + "response":{ + "fieldsMetadata":[], + "items":[["Jane","Doe"],["John","Doe"]], + "last":true, + "queryId":0 + }, + "successStatus":0 +} +---- + + +=== SQL Query Close + +Closes query resources. + +*Request:*:: ++ +[source,shell] +---- +http://host:port/ignite?cmd=qrycls&qryId={queryId} +---- ++ +[{request_table_props}] +|======= +|Parameter +|Type +|Optional +|Description +|Example + + +|`qryId` +|number +| +|Query id that is returned from the `SQL query execute`, `SQL fields query execute`, or `SQL fetch` commands. +|0 +|======= + +*Response:*:: ++ +The command returns 'true' if the query was closed successfully. ++ +[source,json] +---- +{ + "error":"", + "response":true, + "successStatus":0 +} +---- + + diff --git a/docs/_docs/developers-guide/transactions/mvcc.adoc b/docs/_docs/developers-guide/transactions/mvcc.adoc new file mode 100644 index 0000000..b6354a8 --- /dev/null +++ b/docs/_docs/developers-guide/transactions/mvcc.adoc @@ -0,0 +1,179 @@ += Multiversion Concurrency Control + +IMPORTANT: MVCC is currently in beta. + +== Overview + +Caches with the `TRANSACTIONAL_SNAPSHOT` atomicity mode support SQL transactions as well as link:developers-guide/key-value-api/transactions[key-value transactions] and enable multiversion concurrency control (MVCC) for both types of transactions. + + +== Multiversion Concurrency Control + + +Multiversion Concurrency Control (MVCC) is a method of controlling the consistency of data accessed by multiple users concurrently. MVCC implements the https://en.wikipedia.org/wiki/Snapshot_isolation[snapshot isolation] guarantee which ensures that each transaction always sees a consistent snapshot of data. + +Each transaction obtains a consistent snapshot of data when it starts and can only view and modify data in this snapshot. +When the transaction updates an entry, Ignite verifies that the entry has not been updated by other transactions and creates a new version of the entry. +The new version becomes visible to other transactions only when and if this transaction commits successfully. +If the entry has been updated, the current transaction fails with an exception (see the <<Concurrent Updates>> section for the information on how to handle update conflicts). + +//// +*TODO* Artem - we should explain what a physical vs logical snapshot is. I don't know. +//// + +The snapshots are not physical snapshots but logical snapshots that are generated by the MVCC-coordinator: a cluster node that coordinates transactional activity in the cluster. The coordinator keeps track of all active transactions and is notified when each transaction finishes. All operations with an MVCC-enabled cache request a snapshot of data from the coordinator. + +== Enabling MVCC +To enable MVCC for a cache, use the `TRANSACTIONAL_SNAPSHOT` atomicity mode in the cache configuration. If you create a table with the `CREATE TABLE` command, specify the atomicity mode as a parameter in the `WITH` part of the command: + + +[tabs] +-- +tab:XML[] + +[source, xml] +---- +include::code-snippets/xml/mvcc.xml[tags=ignite-config;!discovery, indent=0] +---- + +tab:SQL[] +[source,sql] +---- +CREATE TABLE Person WITH "ATOMICITY=TRANSACTIONAL_SNAPSHOT" +---- +-- + +NOTE: The `TRANSACTIONAL_SNAPSHOT` mode only supports the default concurrency mode (`PESSIMISTIC`) and default isolation level (`REPEATABLE_READ`). See link:developers-guide/key-value-api/transactions#concurrency-modes-and-isolation-levels[Concurrency modes and isolation levels] for details. + + +== Concurrent Updates + +If an entry is read and then updated within a single transaction, it is possible that another transaction could be processed in between the two operations and update the entry first. In this case, an exception is thrown when the first transaction attempts to update the entry and the transaction is marked as "rollback only". You have to retry the transaction. + +This is how to tell that an update conflict has occurred: + +* When Java transaction API is used, a `CacheException` is thrown with the message `Cannot serialize transaction due to write conflict (transaction is marked for rollback)` and the `Transaction.rollbackOnly` flag is set to `true`. +* When SQL transactions are executed through the JDBC or ODBC driver, the `SQLSTATE:40001` error code is returned. + +[tabs] +-- + +tab:Ignite Java[] +[source,java] +---- +for(int i = 1; i <=5 ; i++) { + try (Transaction tx = Ignition.ignite().transactions().txStart()) { + System.out.println("attempt #" + i + ", value: " + cache.get(1)); + try { + cache.put(1, "new value"); + tx.commit(); + System.out.println("attempt #" + i + " succeeded"); + break; + } catch (CacheException e) { + if (!tx.isRollbackOnly()) { + // Transaction was not marked as "rollback only", + // so it's not a concurrent update issue. + // Process the exception here. + break; + } + } + } +} +---- +tab:JDBC[] +[source,java] +---- +Class.forName("org.apache.ignite.IgniteJdbcThinDriver"); + +// Open JDBC connection. +Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1"); + +PreparedStatement updateStmt = null; +PreparedStatement selectStmt = null; + +try { + // starting a transaction + conn.setAutoCommit(false); + + selectStmt = conn.prepareStatement("select name from Person where id = 1"); + selectStmt.setInt(1, 1); + ResultSet rs = selectStmt.executeQuery(); + + if (rs.next()) + System.out.println("name = " + rs.getString("name")); + + updateStmt = conn.prepareStatement("update Person set name = ? where id = ? "); + + updateStmt.setString(1, "New Name"); + updateStmt.setInt(2, 1); + updateStmt.executeUpdate(); + + // committing the transaction + conn.commit(); +} catch (SQLException e) { + if ("40001".equals(e.getSQLState())) { + // retry the transaction + } else { + // process the exception + } +} finally { + if (updateStmt != null) updateStmt.close(); + if (selectStmt != null) selectStmt.close(); +} +---- + +tab:C#/.NET[] +[source,csharp] +---- +include::code-snippets/dotnet/SqlTransactions.cs[tag=mvccConcurrentUpdates,indent=0] +---- + + +tab:C++[] +[source,cpp] +---- +include::code-snippets/cpp/src/concurrent_updates.cpp[tag=concurrent-updates,indent=0] +---- + +-- + + + +== Limitations + +=== Cross-Cache Transactions +The `TRANSACTIONAL_SNAPSHOT` mode is enabled per cache and does not permit caches with different atomicity modes within the same transaction. As a consequence, if you want to cover multiple tables in one SQL transaction, all tables must be created with the `TRANSACTIONAL_SNAPSHOT` mode. + +=== Nested Transactions +Ignite supports three modes of handling nested SQL transactions. They can be enabled via a JDBC/ODBC connection parameter. + + +[source, shell] +---- +jdbc:ignite:thin://127.0.0.1/?nestedTransactionsMode=COMMIT +---- + +When a nested transaction occurs within another transaction, the `nestedTransactionsMode` parameter dictates the system behavior: + +- `ERROR` — When the nested transaction is encountered, an error is thrown and the enclosing transaction is rolled back. This is the default behavior. +- `COMMIT` — The enclosing transaction is committed; the nested transaction starts and is committed when its COMMIT statement is encountered. The rest of the statements in the enclosing transaction are executed as implicit transactions. +- `IGNORE` — DO NOT USE THIS MODE. The beginning of the nested transaction is ignored, statements within the nested transaction will be executed as part of the enclosing transaction, and all changes will be committed with the commit of the nested transaction. The subsequent statements of the enclosing transaction will be executed as implicit transactions. + + +=== Continuous Queries +If you use link:developers-guide/key-value-api/continuous-queries[Continuous Queries] with an MVCC-enabled cache, there are several limitations that you should be aware of: + +* When an update event is received, subsequent reads of the updated key may return the old value for a period of time before the MVCC-coordinator learns of the update. This is because the update event is sent from the node where the key is updated, as soon as it is updated. In such a case, the MVCC-coordinator may not be immediately aware of that update, and therefore, subsequent reads may return outdated information during that period of time. +* There is a limit on the number of keys per node a single transaction can update when continuous queries are used. The updated values are kept in memory, and if there are too many updates, the node might not have enough RAM to keep all the objects. To avoid OutOfMemory errors, each transaction is allowed to update at most 20,000 keys (the default value) on a single node. If this value is exceeded, the transaction will throw an exception and will be rolled back. This number can be change [...] + +=== Other Limitations +The following features are not supported for the MVCC-enabled caches. These limitations may be addressed in future releases. + +* link:developers-guide/near-cache[Near Caches] +* link:developers-guide/configuring-caches/expiry-policies[Expiry Policies] +* link:developers-guide/events/listening-to-events[Events] +* link:{javadoc_base_url}/org/apache/ignite/cache/CacheInterceptor.html[Cache Interceptors] +* link:developers-guide/persistence/external-storage[External Storage] +* link:developers-guide/configuring-caches/on-heap-caching[On-Heap Caching] +* link:{javadoc_base_url}/org/apache/ignite/IgniteCache.html#lock-K-[Explicit Locks] +* The link:{javadoc_base_url}/org/apache/ignite/IgniteCache.html#localEvict-java.util.Collection-[localEvict()] and link:{javadoc_base_url}/org/apache/ignite/IgniteCache.html#localPeek-K-org.apache.ignite.cache.CachePeekMode...-[localPeek()] methods