Modified: cassandra/site/publish/doc/cql3/CQL-2.2.html URL: http://svn.apache.org/viewvc/cassandra/site/publish/doc/cql3/CQL-2.2.html?rev=1735378&r1=1735377&r2=1735378&view=diff ============================================================================== --- cassandra/site/publish/doc/cql3/CQL-2.2.html (original) +++ cassandra/site/publish/doc/cql3/CQL-2.2.html Thu Mar 17 09:27:25 2016 @@ -1,6 +1,6 @@ <?xml version='1.0' encoding='utf-8' ?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html xmlns="http://www.w3.org/1999/xhtml"><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/><title>CQL</title></head><body><p><link rel="StyleSheet" href="CQL.css" type="text/css" media="screen"></p><h1 id="CassandraQueryLanguageCQLv3.3.1">Cassandra Query Language (CQL) v3.3.1</h1><span id="tableOfContents"><ol style="list-style: none;"><li><a href="CQL.html#CassandraQueryLanguageCQLv3.3.1">Cassandra Query Language (CQL) v3.3.1</a><ol style="list-style: none;"><li><a href="CQL.html#CQLSyntax">CQL Syntax</a><ol style="list-style: none;"><li><a href="CQL.html#Preamble">Preamble</a></li><li><a href="CQL.html#Conventions">Conventions</a></li><li><a href="CQL.html#identifiers">Identifiers and keywords</a></li><li><a href="CQL.html#constants">Constants</a></li><li><a href="CQL.html#Comments">Comments</a></l i><li><a href="CQL.html#statements">Statements</a></li><li><a href="CQL.html#preparedStatement">Prepared Statement</a></li></ol></li><li><a href="CQL.html#dataDefinition">Data Definition</a><ol style="list-style: none;"><li><a href="CQL.html#createKeyspaceStmt">CREATE KEYSPACE</a></li><li><a href="CQL.html#useStmt">USE</a></li><li><a href="CQL.html#alterKeyspaceStmt">ALTER KEYSPACE</a></li><li><a href="CQL.html#dropKeyspaceStmt">DROP KEYSPACE</a></li><li><a href="CQL.html#createTableStmt">CREATE TABLE</a></li><li><a href="CQL.html#alterTableStmt">ALTER TABLE</a></li><li><a href="CQL.html#dropTableStmt">DROP TABLE</a></li><li><a href="CQL.html#truncateStmt">TRUNCATE</a></li><li><a href="CQL.html#createIndexStmt">CREATE INDEX</a></li><li><a href="CQL.html#dropIndexStmt">DROP INDEX</a></li><li><a href="CQL.html#createTypeStmt">CREATE TYPE</a></li><li><a href="CQL.html#alterTypeStmt">ALTER TYPE</a></li><li><a href="CQL.html#dropTypeStmt">DROP TYPE</a></li><li><a href="CQL.html#createTri ggerStmt">CREATE TRIGGER</a></li><li><a href="CQL.html#dropTriggerStmt">DROP TRIGGER</a></li><li><a href="CQL.html#createFunctionStmt">CREATE FUNCTION</a></li><li><a href="CQL.html#dropFunctionStmt">DROP FUNCTION</a></li><li><a href="CQL.html#createAggregateStmt">CREATE AGGREGATE</a></li><li><a href="CQL.html#dropAggregateStmt">DROP AGGREGATE</a></li></ol></li><li><a href="CQL.html#dataManipulation">Data Manipulation</a><ol style="list-style: none;"><li><a href="CQL.html#insertStmt">INSERT</a></li><li><a href="CQL.html#updateStmt">UPDATE</a></li><li><a href="CQL.html#deleteStmt">DELETE</a></li><li><a href="CQL.html#batchStmt">BATCH</a></li></ol></li><li><a href="CQL.html#queries">Queries</a><ol style="list-style: none;"><li><a href="CQL.html#selectStmt">SELECT</a></li></ol></li><li><a href="CQL.html#databaseRoles">Database Roles</a><ol style="list-style: none;"><li><a href="CQL.html#createRoleStmt">CREATE ROLE</a></li><li><a href="CQL.html#alterRoleStmt">ALTER ROLE</a></li><li><a hr ef="CQL.html#dropRoleStmt">DROP ROLE</a></li><li><a href="CQL.html#grantRoleStmt">GRANT ROLE</a></li><li><a href="CQL.html#revokeRoleStmt">REVOKE ROLE</a></li><li><a href="CQL.html#createUserStmt">CREATE USER </a></li><li><a href="CQL.html#alterUserStmt">ALTER USER </a></li><li><a href="CQL.html#dropUserStmt">DROP USER </a></li><li><a href="CQL.html#listUsersStmt">LIST USERS</a></li></ol></li><li><a href="CQL.html#dataControl">Data Control</a><ol style="list-style: none;"><li><a href="CQL.html#permissions">Permissions </a></li><li><a href="CQL.html#grantPermissionsStmt">GRANT PERMISSION</a></li><li><a href="CQL.html#revokePermissionsStmt">REVOKE PERMISSION</a></li></ol></li><li><a href="CQL.html#types">Data Types</a><ol style="list-style: none;"><li><a href="CQL.html#usingtimestamps">Working with timestamps</a></li><li><a href="CQL.html#usingdates">Working with dates</a></li><li><a href="CQL.html#usingtime">Working with time</a></li><li><a href="CQL.html#counters">Counters</a></li>< li><a href="CQL.html#collections">Working with collections</a></li></ol></li><li><a href="CQL.html#functions">Functions</a><ol style="list-style: none;"><li><a href="CQL.html#tokenFun">Token</a></li><li><a href="CQL.html#uuidFun">Uuid</a></li><li><a href="CQL.html#timeuuidFun">Timeuuid functions</a></li><li><a href="CQL.html#timeFun">Time conversion functions</a></li><li><a href="CQL.html#blobFun">Blob conversion functions</a></li></ol></li><li><a href="CQL.html#aggregates">Aggregates</a><ol style="list-style: none;"><li><a href="CQL.html#countFct">Count</a></li><li><a href="CQL.html#maxMinFcts">Max and Min</a></li><li><a href="CQL.html#sumFct">Sum</a></li><li><a href="CQL.html#avgFct">Avg</a></li></ol></li><li><a href="CQL.html#udfs">User-Defined Functions</a></li><li><a href="CQL.html#udas">User-Defined Aggregates</a></li><li><a href="CQL.html#json">JSON Support</a><ol style="list-style: none;"><li><a href="CQL.html#selectJson">SELECT JSON</a></li><li><a href="CQL.html#insertJson" >INSERT JSON</a></li><li><a href="CQL.html#jsonEncoding">JSON Encoding of >Cassandra Data Types</a></li><li><a href="CQL.html#fromJson">The fromJson() >Function</a></li><li><a href="CQL.html#toJson">The toJson() >Function</a></li></ol></li><li><a href="CQL.html#appendixA">Appendix A: CQL >Keywords</a></li><li><a href="CQL.html#appendixB">Appendix B: CQL Reserved >Types</a></li><li><a href="CQL.html#changes">Changes</a><ol >style="list-style: none;"><li><a href="CQL.html#a3.3.1">3.3.1</a></li><li><a >href="CQL.html#a3.3.0">3.3.0</a></li><li><a >href="CQL.html#a3.2.0">3.2.0</a></li><li><a >href="CQL.html#a3.1.7">3.1.7</a></li><li><a >href="CQL.html#a3.1.6">3.1.6</a></li><li><a >href="CQL.html#a3.1.5">3.1.5</a></li><li><a >href="CQL.html#a3.1.4">3.1.4</a></li><li><a >href="CQL.html#a3.1.3">3.1.3</a></li><li><a >href="CQL.html#a3.1.2">3.1.2</a></li><li><a >href="CQL.html#a3.1.1">3.1.1</a></li><li><a >href="CQL.html#a3.1.0">3.1.0</a></li><li><a >href="CQL.html#a3.0.5">3.0.5</a></li><li><a href="CQL.html# a3.0.4">3.0.4</a></li><li><a href="CQL.html#a3.0.3">3.0.3</a></li><li><a href="CQL.html#a3.0.2">3.0.2</a></li><li><a href="CQL.html#a3.0.1">3.0.1</a></li></ol></li><li><a href="CQL.html#Versioning">Versioning</a></li></ol></li></ol></span><h2 id="CQLSyntax">CQL Syntax</h2><h3 id="Preamble">Preamble</h3><p>This document describes the Cassandra Query Language (CQL) version 3. CQL v3 is not backward compatible with CQL v2 and differs from it in numerous ways. Note that this document describes the last version of the languages. However, the <a href="#changes">changes</a> section provides the diff between the different versions of CQL v3.</p><p>CQL v3 offers a model very close to SQL in the sense that data is put in <em>tables</em> containing <em>rows</em> of <em>columns</em>. For that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have in SQL. But please note that as such, they do <strong>not</strong> refer to the concept of rows and columns found in the internal implementation of Cassandra and in the thrift and CQL v2 API.</p><h3 id="Conventions">Conventions</h3><p>To aid in specifying the CQL syntax, we will use the following conventions in this document:</p><ul><li>Language rules will be given in a <a href="http://en.wikipedia.org/wiki/Backus%E2%80%93Naur_Form">BNF</a> -like notation:</li></ul><pre class="syntax"><pre><start> ::= TERMINAL <non-terminal1> <non-terminal1> </pre></pre><ul><li>Nonterminal symbols will have <code><angle brackets></code>.</li><li>As additional shortcut notations to BNF, we’ll use traditional regular expression’s symbols (<code>?</code>, <code>+</code> and <code>*</code>) to signify that a given symbol is optional and/or can be repeated. We’ll also allow parentheses to group symbols and the <code>[<characters>]</code> notation to represent any one of <code><characters></code>.</li><li>The grammar is provided for documentation purposes and leave some minor details out. For instance, the last column definition in a <code>CREATE TABLE</code> statement is optional but supported if present even though the provided grammar in this document suggest it is not supported. </li><li>Sample code will be provided in a code block:</li></ul><pre class="sample"><pre>SELECT sample_usage FROM cql; -</pre></pre><ul><li>References to keywords or pieces of CQL code in running text will be shown in a <code>fixed-width font</code>.</li></ul><h3 id="identifiers">Identifiers and keywords</h3><p>The CQL language uses <em>identifiers</em> (or <em>names</em>) to identify tables, columns and other objects. An identifier is a token matching the regular expression <code>[a-zA-Z]</code><code>[a-zA-Z0-9_]</code><code>*</code>.</p><p>A number of such identifiers, like <code>SELECT</code> or <code>WITH</code>, are <em>keywords</em>. They have a fixed meaning for the language and most are reserved. The list of those keywords can be found in <a href="#appendixA">Appendix A</a>.</p><p>Identifiers and (unquoted) keywords are case insensitive. Thus <code>SELECT</code> is the same than <code>select</code> or <code>sElEcT</code>, and <code>myId</code> is the same than <code>myid</code> or <code>MYID</code> for instance. A convention often used (in particular by the samples of this documentation) is t o use upper case for keywords and lower case for other identifiers.</p><p>There is a second kind of identifiers called <em>quoted identifiers</em> defined by enclosing an arbitrary sequence of characters in double-quotes(<code>"</code>). Quoted identifiers are never keywords. Thus <code>"select"</code> is not a reserved keyword and can be used to refer to a column, while <code>select</code> would raise a parse error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case sensitive (<code>"My Quoted Id"</code> is <em>different</em> from <code>"my quoted id"</code>). A fully lowercase quoted identifier that matches <code>[a-zA-Z]</code><code>[a-zA-Z0-9_]</code><code>*</code> is equivalent to the unquoted identifier obtained by removing the double-quote (so <code>"myid"</code> is equivalent to <code>myid</code> and to <code>myId</code> but different from <code>"myId"</code>). Inside a quoted identifier, the double-quote character can be repeated to escape it , so <code>"foo "" bar"</code> is a valid identifier.</p><h3 id="constants">Constants</h3><p>CQL defines the following kind of <em>constants</em>: strings, integers, floats, booleans, uuids and blobs:</p><ul><li>A string constant is an arbitrary sequence of characters characters enclosed by single-quote(<code>'</code>). One can include a single-quote in a string by repeating it, e.g. <code>'It''s raining today'</code>. Those are not to be confused with quoted identifiers that use double-quotes.</li><li>An integer constant is defined by <code>'-'?[0-9]+</code>.</li><li>A float constant is defined by <code>'-'?[0-9]+('.'[0-9]*)?([eE][+-]?[0-9+])?</code>. On top of that, <code>NaN</code> and <code>Infinity</code> are also float constants.</li><li>A boolean constant is either <code>true</code> or <code>false</code> up to case-insensitivity (i.e. <code>True</code> is a valid boolean constant).</li><li>A <a href="http://en.wikipedia.org/wiki/Universally_unique_identifier">UUID</a> constan t is defined by <code>hex{8}-hex{4}-hex{4}-hex{4}-hex{12}</code> where <code>hex</code> is an hexadecimal character, e.g. <code>[0-9a-fA-F]</code> and <code>{4}</code> is the number of such characters.</li><li>A blob constant is an hexadecimal number defined by <code>0[xX](hex)+</code> where <code>hex</code> is an hexadecimal character, e.g. <code>[0-9a-fA-F]</code>.</li></ul><p>For how these constants are typed, see the <a href="#types">data types section</a>.</p><h3 id="Comments">Comments</h3><p>A comment in CQL is a line beginning by either double dashes (<code>--</code>) or double slash (<code>//</code>).</p><p>Multi-line comments are also supported through enclosure within <code>/*</code> and <code>*/</code> (but nesting is not supported).</p><pre class="sample"><pre>-- This is a comment +</pre></pre><ul><li>References to keywords or pieces of CQL code in running text will be shown in a <code>fixed-width font</code>.</li></ul><h3 id="identifiers">Identifiers and keywords</h3><p>The CQL language uses <em>identifiers</em> (or <em>names</em>) to identify tables, columns and other objects. An identifier is a token matching the regular expression <code>[a-zA-Z]</code><code>[a-zA-Z0-9_]</code><code>*</code>.</p><p>A number of such identifiers, like <code>SELECT</code> or <code>WITH</code>, are <em>keywords</em>. They have a fixed meaning for the language and most are reserved. The list of those keywords can be found in <a href="#appendixA">Appendix A</a>.</p><p>Identifiers and (unquoted) keywords are case insensitive. Thus <code>SELECT</code> is the same than <code>select</code> or <code>sElEcT</code>, and <code>myId</code> is the same than <code>myid</code> or <code>MYID</code> for instance. A convention often used (in particular by the samples of this documentation) is t o use upper case for keywords and lower case for other identifiers.</p><p>There is a second kind of identifiers called <em>quoted identifiers</em> defined by enclosing an arbitrary sequence of characters in double-quotes(<code>"</code>). Quoted identifiers are never keywords. Thus <code>"select"</code> is not a reserved keyword and can be used to refer to a column, while <code>select</code> would raise a parse error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case sensitive (<code>"My Quoted Id"</code> is <em>different</em> from <code>"my quoted id"</code>). A fully lowercase quoted identifier that matches <code>[a-zA-Z]</code><code>[a-zA-Z0-9_]</code><code>*</code> is equivalent to the unquoted identifier obtained by removing the double-quote (so <code>"myid"</code> is equivalent to <code>myid</code> and to <code>myId</code> but different from <code>"myId"</code>). Inside a quoted identifier, the double-quote character can be repeated to escape it , so <code>"foo "" bar"</code> is a valid identifier.</p><p><strong>Warning</strong>: <em>quoted identifiers</em> allows to declare columns with arbitrary names, and those can sometime clash with specific names used by the server. For instance, when using conditional update, the server will respond with a result-set containing a special result named <code>"[applied]"</code>. If you’ve declared a column with such a name, this could potentially confuse some tools and should be avoided. In general, unquoted identifiers should be preferred but if you use quoted identifiers, it is strongly advised to avoid any name enclosed by squared brackets (like <code>"[applied]"</code>) and any name that looks like a function call (like <code>"f(x)"</code>).</p><h3 id="constants">Constants</h3><p>CQL defines the following kind of <em>constants</em>: strings, integers, floats, booleans, uuids and blobs:</p><ul><li>A string constant is an arbitrary sequence of characters characters enclosed by s ingle-quote(<code>'</code>). One can include a single-quote in a string by repeating it, e.g. <code>'It''s raining today'</code>. Those are not to be confused with quoted identifiers that use double-quotes.</li><li>An integer constant is defined by <code>'-'?[0-9]+</code>.</li><li>A float constant is defined by <code>'-'?[0-9]+('.'[0-9]*)?([eE][+-]?[0-9+])?</code>. On top of that, <code>NaN</code> and <code>Infinity</code> are also float constants.</li><li>A boolean constant is either <code>true</code> or <code>false</code> up to case-insensitivity (i.e. <code>True</code> is a valid boolean constant).</li><li>A <a href="http://en.wikipedia.org/wiki/Universally_unique_identifier">UUID</a> constant is defined by <code>hex{8}-hex{4}-hex{4}-hex{4}-hex{12}</code> where <code>hex</code> is an hexadecimal character, e.g. <code>[0-9a-fA-F]</code> and <code>{4}</code> is the number of such characters.</li><li>A blob constant is an hexadecimal number defined by <code>0[xX](hex)+</code> where <code>hex</code> is an hexadecimal character, e.g. <code>[0-9a-fA-F]</code>.</li></ul><p>For how these constants are typed, see the <a href="#types">data types section</a>.</p><h3 id="Comments">Comments</h3><p>A comment in CQL is a line beginning by either double dashes (<code>--</code>) or double slash (<code>//</code>).</p><p>Multi-line comments are also supported through enclosure within <code>/*</code> and <code>*/</code> (but nesting is not supported).</p><pre class="sample"><pre>-- This is a comment // This is a comment too /* This is a multi-line comment */ @@ -104,7 +104,7 @@ CREATE TABLE timeline ( INSERT INTO test(pk, t, v, s) VALUES (0, 0, 'val0', 'static0'); INSERT INTO test(pk, t, v, s) VALUES (0, 1, 'val1', 'static1'); SELECT * FROM test WHERE pk=0 AND t=0; -</pre></pre><p>the last query will return <code>'static1'</code> as value for <code>s</code>, since <code>s</code> is static and thus the 2nd insertion modified this “shared” value. Note however that static columns are only static within a given partition, and if in the example above both rows where from different partitions (i.e. if they had different value for <code>pk</code>), then the 2nd insertion would not have modified the value of <code>s</code> for the first row.</p><p>A few restrictions applies to when static columns are allowed:</p><ul><li>tables with the <code>COMPACT STORAGE</code> option (see below) cannot have them</li><li>a table without clustering columns cannot have static columns (in a table without clustering columns, every partition has only one row, and so every column is inherently static).</li><li>only non <code>PRIMARY KEY</code> columns can be static</li></ul><h4 id="createTableOptions"><code><option></code></h4><p>The <code>CREATE TABLE</cod e> statement supports a number of options that controls the configuration of a new table. These options can be specified after the <code>WITH</code> keyword.</p><p>The first of these option is <code>COMPACT STORAGE</code>. This option is mainly targeted towards backward compatibility for definitions created before CQL3 (see <a href="http://www.datastax.com/dev/blog/thrift-to-cql3">www.datastax.com/dev/blog/thrift-to-cql3</a> for more details). The option also provides a slightly more compact layout of data on disk but at the price of diminished flexibility and extensibility for the table. Most notably, <code>COMPACT STORAGE</code> tables cannot have collections nor static columns and a <code>COMPACT STORAGE</code> table with at least one clustering column supports exactly one (as in not 0 nor more than 1) column not part of the <code>PRIMARY KEY</code> definition (which imply in particular that you cannot add nor remove columns after creation). For those reasons, <code>COMPACT STO RAGE</code> is not recommended outside of the backward compatibility reason evoked above.</p><p>Another option is <code>CLUSTERING ORDER</code>. It allows to define the ordering of rows on disk. It takes the list of the clustering column names with, for each of them, the on-disk order (Ascending or descending). Note that this option affects <a href="#selectOrderBy">what <code>ORDER BY</code> are allowed during <code>SELECT</code></a>.</p><p>Table creation supports the following other <code><property></code>:</p><table><tr><th>option </th><th>kind </th><th>default </th><th>description</th></tr><tr><td><code>comment</code> </td><td><em>simple</em> </td><td>none </td><td>A free-form, human-readable comment.</td></tr><tr><td><code>read_repair_chance</code> </td><td><em>simple</em> </td><td>0.1 </td><td>The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) for the purpos e of read repairs.</td></tr><tr><td><code>dclocal_read_repair_chance</code> </td><td><em>simple</em> </td><td>0 </td><td>The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) belonging to the same data center than the read coordinator for the purpose of read repairs.</td></tr><tr><td><code>gc_grace_seconds</code> </td><td><em>simple</em> </td><td>864000 </td><td>Time to wait before garbage collecting tombstones (deletion markers).</td></tr><tr><td><code>bloom_filter_fp_chance</code> </td><td><em>simple</em> </td><td>0.00075 </td><td>The target probability of false positive of the sstable bloom filters. Said bloom filters will be sized to provide the provided probability (thus lowering this value impact the size of bloom filters in-memory and on-disk)</td></tr><tr><td><code>default_time_to_live</code> </td><td><em>simple</em> </td><td>0 </td><td>The default expiration time (“TTL& #8221;) in seconds for a table.</td></tr><tr><td><code>compaction</code> </td><td><em>map</em> </td><td><em>see below</em> </td><td>Compaction options, see <a href="#compactionOptions">below</a>.</td></tr><tr><td><code>compression</code> </td><td><em>map</em> </td><td><em>see below</em> </td><td>Compression options, see <a href="#compressionOptions">below</a>.</td></tr><tr><td><code>caching</code> </td><td><em>map</em> </td><td><em>see below</em> </td><td>Caching options, see <a href="#cachingOptions">below</a>.</td></tr></table><h4 id="compactionOptions">Compaction options</h4><p>The <code>compaction</code> property must at least define the <code>'class'</code> sub-option, that defines the compaction strategy class to use. The default supported class are <code>'SizeTieredCompactionStrategy'</code>, <code>'LeveledCompactionStrategy'</code> and <code>'DateTieredCompactionStrategy'</code>. Custom strategy can be provided by sp ecifying the full class name as a <a href="#constants">string constant</a>. The rest of the sub-options depends on the chosen class. The sub-options supported by the default classes are:</p><table><tr><th>option </th><th>supported compaction strategy </th><th>default </th><th>description </th></tr><tr><td><code>enabled</code> </td><td><em>all</em> </td><td>true </td><td>A boolean denoting whether compaction should be enabled or not.</td></tr><tr><td><code>tombstone_threshold</code> </td><td><em>all</em> </td><td>0.2 </td><td>A ratio such that if a sstable has more than this ratio of gcable tombstones over all contained columns, the sstable will be compacted (with no other sstables) for the purpose of purging those tombstones. </td></tr><tr><td><code>tombstone_compaction_interval</code> </td><td><em>all</em> </td><td>1 day </td><td>The minimum time to wait after an sstable creation time before considering it for “tombstone compaction”, where “tombstone compaction” is the compaction triggered if the sstable has more gcable tombstones than <code>tombstone_threshold</code>. </td></tr><tr><td><code>unchecked_tombstone_compaction</code> </td><td><em>all</em> </td><td>false </td><td>Setting this to true enables more aggressive tombstone compactions – single sstable tombstone compactions will run without checking how likely it is that they will be successful. </td></tr><tr><td><code>min_sstable_size</code> </td><td>SizeTieredCompactionStrategy </td><td>50MB </td><td>The size tiered strategy groups SSTables to compact in buckets. A bucket groups SSTables that differs from less than 50% in size. However, for small sizes, this would result in a bucketing that is too fine grained. <code>min_sstable_size</code> defines a siz e threshold (in bytes) below which all SSTables belong to one unique bucket</td></tr><tr><td><code>min_threshold</code> </td><td>SizeTieredCompactionStrategy </td><td>4 </td><td>Minimum number of SSTables needed to start a minor compaction.</td></tr><tr><td><code>max_threshold</code> </td><td>SizeTieredCompactionStrategy </td><td>32 </td><td>Maximum number of SSTables processed by one minor compaction.</td></tr><tr><td><code>bucket_low</code> </td><td>SizeTieredCompactionStrategy </td><td>0.5 </td><td>Size tiered consider sstables to be within the same bucket if their size is within [average_size * <code>bucket_low</code>, average_size * <code>bucket_high</code> ] (i.e the default groups sstable whose sizes diverges by at most 50%)</td></tr><tr><td><code>bucket_high</code> </td><td>SizeTieredCompactionStrategy </td><td>1.5 </td><td>Size tiered consider sstables to be within the same bucket if their size is within [average_size * <code>bucket_low</code>, average_size * <code>bucket_high</code> ] (i.e the default groups sstable whose sizes diverges by at most 50%).</td></tr><tr><td><code>sstable_size_in_mb</code> </td><td>LeveledCompactionStrategy </td><td>5MB </td><td>The target size (in MB) for sstables in the leveled strategy. Note that while sstable sizes should stay less or equal to <code>sstable_size_in_mb</code>, it is possible to exceptionally have a larger sstable as during compaction, data for a given partition key are never split into 2 sstables</td></tr><tr><td><code>timestamp_resolution</code> </td><td>DateTieredCompactionStrategy </td><td>MICROSECONDS </td><td>The timestamp resolution used when inserting data, could be MILLISECONDS, MICROSECONDS etc (should be understandable by Java TimeUnit)</td></tr><tr><td><code>base_time_seconds</code> </td><td>DateTieredCompactionStrate gy </td><td>60 </td><td>The base size of the time windows. </td></tr><tr><td><code>max_sstable_age_days</code> </td><td>DateTieredCompactionStrategy </td><td>365 </td><td>SSTables only containing data that is older than this will never be compacted. </td></tr></table><h4 id="compressionOptions">Compression options</h4><p>For the <code>compression</code> property, the following sub-options are available:</p><table><tr><th>option </th><th>default </th><th>description </th></tr><tr><td><code>sstable_compression</code> </td><td>LZ4Compressor </td><td>The compression algorithm to use. Default compressor are: LZ4Compressor, SnappyCompressor and DeflateCompressor. Use an empty string (<code>''</code>) to disable compression. Custom compressor can be provided by specifying the full class name as a <a href="#constants">string constant</a>.</td></tr><tr><td><code>chunk_length_kb</code> </td><td>64KB </td><td>On disk SST ables are compressed by block (to allow random reads). This defines the size (in KB) of said block. Bigger values may improve the compression rate, but increases the minimum size of data to be read from disk for a read </td></tr><tr><td><code>crc_check_chance</code> </td><td>1.0 </td><td>When compression is enabled, each compressed block includes a checksum of that block for the purpose of detecting disk bitrot and avoiding the propagation of corruption to other replica. This option defines the probability with which those checksums are checked during read. By default they are always checked. Set to 0 to disable checksum checking and to 0.5 for instance to check them every other read</td></tr></table><h4 id="cachingOptions">Caching options</h4><p>For the <code>caching</code> property, the following sub-options are available:</p><table><tr><th>option </th><th>default </th><th>description </th></tr><tr><td><code>keys</code> </td><td> ALL </td><td>Whether to cache keys (“key cache”) for this table. Valid values are: <code>ALL</code> and <code>NONE</code>.</td></tr><tr><td><code>rows_per_partition</code> </td><td>NONE </td><td>The amount of rows to cache per partition (“row cache”). If an integer <code>n</code> is specified, the first <code>n</code> queried rows of a partition will be cached. Other possible options are <code>ALL</code>, to cache all rows of a queried partition, or <code>NONE</code> to disable row caching.</td></tr></table><h4 id="Otherconsiderations">Other considerations:</h4><ul><li>When <a href="#insertStmt">inserting</a> / <a href="#updateStmt">updating</a> a given row, not all columns needs to be defined (except for those part of the key), and missing columns occupy no space on disk. Furthermore, adding new columns (see <a href=#alterStmt><tt>ALTER TABLE</tt></a>) is a constant time operation. There is thus no need to try to anticipate future usage (or to cry w hen you haven’t) when creating a table.</li></ul><h3 id="alterTableStmt">ALTER TABLE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre><alter-table-stmt> ::= ALTER (TABLE | COLUMNFAMILY) <tablename> <instruction> +</pre></pre><p>the last query will return <code>'static1'</code> as value for <code>s</code>, since <code>s</code> is static and thus the 2nd insertion modified this “shared” value. Note however that static columns are only static within a given partition, and if in the example above both rows where from different partitions (i.e. if they had different value for <code>pk</code>), then the 2nd insertion would not have modified the value of <code>s</code> for the first row.</p><p>A few restrictions applies to when static columns are allowed:</p><ul><li>tables with the <code>COMPACT STORAGE</code> option (see below) cannot have them</li><li>a table without clustering columns cannot have static columns (in a table without clustering columns, every partition has only one row, and so every column is inherently static).</li><li>only non <code>PRIMARY KEY</code> columns can be static</li></ul><h4 id="createTableOptions"><code><option></code></h4><p>The <code>CREATE TABLE</cod e> statement supports a number of options that controls the configuration of a new table. These options can be specified after the <code>WITH</code> keyword.</p><p>The first of these option is <code>COMPACT STORAGE</code>. This option is mainly targeted towards backward compatibility for definitions created before CQL3 (see <a href="http://www.datastax.com/dev/blog/thrift-to-cql3">www.datastax.com/dev/blog/thrift-to-cql3</a> for more details). The option also provides a slightly more compact layout of data on disk but at the price of diminished flexibility and extensibility for the table. Most notably, <code>COMPACT STORAGE</code> tables cannot have collections nor static columns and a <code>COMPACT STORAGE</code> table with at least one clustering column supports exactly one (as in not 0 nor more than 1) column not part of the <code>PRIMARY KEY</code> definition (which imply in particular that you cannot add nor remove columns after creation). For those reasons, <code>COMPACT STO RAGE</code> is not recommended outside of the backward compatibility reason evoked above.</p><p>Another option is <code>CLUSTERING ORDER</code>. It allows to define the ordering of rows on disk. It takes the list of the clustering column names with, for each of them, the on-disk order (Ascending or descending). Note that this option affects <a href="#selectOrderBy">what <code>ORDER BY</code> are allowed during <code>SELECT</code></a>.</p><p>Table creation supports the following other <code><property></code>:</p><table><tr><th>option </th><th>kind </th><th>default </th><th>description</th></tr><tr><td><code>comment</code> </td><td><em>simple</em> </td><td>none </td><td>A free-form, human-readable comment.</td></tr><tr><td><code>read_repair_chance</code> </td><td><em>simple</em> </td><td>0.1 </td><td>The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) for the purpos e of read repairs.</td></tr><tr><td><code>dclocal_read_repair_chance</code> </td><td><em>simple</em> </td><td>0 </td><td>The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) belonging to the same data center than the read coordinator for the purpose of read repairs.</td></tr><tr><td><code>gc_grace_seconds</code> </td><td><em>simple</em> </td><td>864000 </td><td>Time to wait before garbage collecting tombstones (deletion markers).</td></tr><tr><td><code>bloom_filter_fp_chance</code> </td><td><em>simple</em> </td><td>0.00075 </td><td>The target probability of false positive of the sstable bloom filters. Said bloom filters will be sized to provide the provided probability (thus lowering this value impact the size of bloom filters in-memory and on-disk)</td></tr><tr><td><code>default_time_to_live</code> </td><td><em>simple</em> </td><td>0 </td><td>The default expiration time (“TTL& #8221;) in seconds for a table.</td></tr><tr><td><code>compaction</code> </td><td><em>map</em> </td><td><em>see below</em> </td><td>Compaction options, see <a href="#compactionOptions">below</a>.</td></tr><tr><td><code>compression</code> </td><td><em>map</em> </td><td><em>see below</em> </td><td>Compression options, see <a href="#compressionOptions">below</a>.</td></tr><tr><td><code>caching</code> </td><td><em>map</em> </td><td><em>see below</em> </td><td>Caching options, see <a href="#cachingOptions">below</a>.</td></tr></table><h4 id="compactionOptions">Compaction options</h4><p>The <code>compaction</code> property must at least define the <code>'class'</code> sub-option, that defines the compaction strategy class to use. The default supported class are <code>'SizeTieredCompactionStrategy'</code>, <code>'LeveledCompactionStrategy'</code> and <code>'DateTieredCompactionStrategy'</code>. Custom strategy can be provided by sp ecifying the full class name as a <a href="#constants">string constant</a>. The rest of the sub-options depends on the chosen class. The sub-options supported by the default classes are:</p><table><tr><th>option </th><th>supported compaction strategy </th><th>default </th><th>description </th></tr><tr><td><code>enabled</code> </td><td><em>all</em> </td><td>true </td><td>A boolean denoting whether compaction should be enabled or not.</td></tr><tr><td><code>tombstone_threshold</code> </td><td><em>all</em> </td><td>0.2 </td><td>A ratio such that if a sstable has more than this ratio of gcable tombstones over all contained columns, the sstable will be compacted (with no other sstables) for the purpose of purging those tombstones. </td></tr><tr><td><code>tombstone_compaction_interval</code> </td><td><em>all</em> </td><td>1 day </td><td>The minimum time to wait after an sstable creation time before considering it for “tombstone compaction”, where “tombstone compaction” is the compaction triggered if the sstable has more gcable tombstones than <code>tombstone_threshold</code>. </td></tr><tr><td><code>unchecked_tombstone_compaction</code> </td><td><em>all</em> </td><td>false </td><td>Setting this to true enables more aggressive tombstone compactions – single sstable tombstone compactions will run without checking how likely it is that they will be successful. </td></tr><tr><td><code>min_sstable_size</code> </td><td>SizeTieredCompactionStrategy </td><td>50MB </td><td>The size tiered strategy groups SSTables to compact in buckets. A bucket groups SSTables that differs from less than 50% in size. However, for small sizes, this would result in a bucketing that is too fine grained. <code>min_sstable_size</code> defines a siz e threshold (in bytes) below which all SSTables belong to one unique bucket</td></tr><tr><td><code>min_threshold</code> </td><td>SizeTieredCompactionStrategy </td><td>4 </td><td>Minimum number of SSTables needed to start a minor compaction.</td></tr><tr><td><code>max_threshold</code> </td><td>SizeTieredCompactionStrategy </td><td>32 </td><td>Maximum number of SSTables processed by one minor compaction.</td></tr><tr><td><code>bucket_low</code> </td><td>SizeTieredCompactionStrategy </td><td>0.5 </td><td>Size tiered consider sstables to be within the same bucket if their size is within [average_size * <code>bucket_low</code>, average_size * <code>bucket_high</code> ] (i.e the default groups sstable whose sizes diverges by at most 50%)</td></tr><tr><td><code>bucket_high</code> </td><td>SizeTieredCompactionStrategy </td><td>1.5 </td><td>Size tiered consider sstables to be within the same bucket if their size is within [average_size * <code>bucket_low</code>, average_size * <code>bucket_high</code> ] (i.e the default groups sstable whose sizes diverges by at most 50%).</td></tr><tr><td><code>sstable_size_in_mb</code> </td><td>LeveledCompactionStrategy </td><td>5MB </td><td>The target size (in MB) for sstables in the leveled strategy. Note that while sstable sizes should stay less or equal to <code>sstable_size_in_mb</code>, it is possible to exceptionally have a larger sstable as during compaction, data for a given partition key are never split into 2 sstables</td></tr><tr><td><code>timestamp_resolution</code> </td><td>DateTieredCompactionStrategy </td><td>MICROSECONDS </td><td>The timestamp resolution used when inserting data, could be MILLISECONDS, MICROSECONDS etc (should be understandable by Java TimeUnit) - don’t change this unless you do mutations with USING TIMESTAMP <non_microsecond_timestam ps> (or equivalent directly in the client)</td></tr><tr><td><code>base_time_seconds</code> </td><td>DateTieredCompactionStrategy </td><td>60 </td><td>The base size of the time windows. </td></tr><tr><td><code>max_sstable_age_days</code> </td><td>DateTieredCompactionStrategy </td><td>365 </td><td>SSTables only containing data that is older than this will never be compacted. </td></tr></table><h4 id="compressionOptions">Compression options</h4><p>For the <code>compression</code> property, the following sub-options are available:</p><table><tr><th>option </th><th>default </th><th>description </th></tr><tr><td><code>sstable_compression</code> </td><td>LZ4Compressor </td><td>The compression algorithm to use. Default compressor are: LZ4Compressor, SnappyCompressor and DeflateCompressor. Use an empty string (<code>''</code>) to disable compression. Custom compressor can be provided by specifying the full class name as a <a href="#constants">string constant</a>.</td></tr><tr><td><code>chunk_length_kb</code> </td><td>64KB </td><td>On disk SSTables are compressed by block (to allow random reads). This defines the size (in KB) of said block. Bigger values may improve the compression rate, but increases the minimum size of data to be read from disk for a read </td></tr><tr><td><code>crc_check_chance</code> </td><td>1.0 </td><td>When compression is enabled, each compressed block includes a checksum of that block for the purpose of detecting disk bitrot and avoiding the propagation of corruption to other replica. This option defines the probability with which those checksums are checked during read. By default they are always checked. Set to 0 to disable checksum checking and to 0.5 for instance to check them every other read</td></tr></table><h4 id="cachingOptions">Caching options</h4><p>For the <code>caching</code> property, the following sub-options are available:</p><t able><tr><th>option </th><th>default </th><th>description </th></tr><tr><td><code>keys</code> </td><td>ALL </td><td>Whether to cache keys (“key cache”) for this table. Valid values are: <code>ALL</code> and <code>NONE</code>.</td></tr><tr><td><code>rows_per_partition</code> </td><td>NONE </td><td>The amount of rows to cache per partition (“row cache”). If an integer <code>n</code> is specified, the first <code>n</code> queried rows of a partition will be cached. Other possible options are <code>ALL</code>, to cache all rows of a queried partition, or <code>NONE</code> to disable row caching.</td></tr></table><h4 id="Otherconsiderations">Other considerations:</h4><ul><li>When <a href="#insertStmt">inserting</a> / <a href="#updateStmt">updating</a> a given row, not all columns needs to be defined (except for those part of the key), and missing columns occupy no space on disk. Furthermore, adding new columns (see <a h ref=#alterStmt><tt>ALTER TABLE</tt></a>) is a constant time operation. There is thus no need to try to anticipate future usage (or to cry when you haven’t) when creating a table.</li></ul><h3 id="alterTableStmt">ALTER TABLE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre><alter-table-stmt> ::= ALTER (TABLE | COLUMNFAMILY) <tablename> <instruction> <instruction> ::= ALTER <identifier> TYPE <type> | ADD <identifier> <type> @@ -259,14 +259,18 @@ INSERT INTO NerdMovies JSON '{"movie": " | <identifier> '=' <identifier> '+' <map-literal> | <identifier> '[' <term> ']' '=' <term> -<condition> ::= <identifier> '=' <term> - | <identifier> '[' <term> ']' '=' <term> +<condition> ::= <identifier> <op> <term> + | <identifier> IN (<variable> | '(' ( <term> ( ',' <term> )* )? ')') + | <identifier> '[' <term> ']' <op> <term> + | <identifier> '[' <term> ']' IN <term> + +<op> ::= '<' | '<=' | '=' | '!=' | '>=' | '>' <where-clause> ::= <relation> ( AND <relation> )* <relation> ::= <identifier> '=' <term> | <identifier> IN '(' ( <term> ( ',' <term> )* )? ')' - | <identifier> IN '?' + | <identifier> IN <variable> <option> ::= TIMESTAMP <integer> | TTL <integer> @@ -277,7 +281,7 @@ SET director = 'Joss Whedon', WHERE movie = 'Serenity'; UPDATE UserActions SET total = total + 2 WHERE user = B70DE1D0-9908-4AE3-BE34-5573E5B09F14 AND action = 'click'; -</pre></pre><p><br/>The <code>UPDATE</code> statement writes one or more columns for a given row in a table. The <code><where-clause></code> is used to select the row to update and must include all columns composing the <code>PRIMARY KEY</code> (the <code>IN</code> relation is only supported for the last column of the partition key). Other columns values are specified through <code><assignment></code> after the <code>SET</code> keyword.</p><p>Note that unlike in SQL, <code>UPDATE</code> does not check the prior existence of the row by default: the row is created if none existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.</p><p>It is however possible to use the conditions on some columns through <code>IF</code>, in which case the row will not be updated unless such condition are met. But please note that using <code>IF</code> conditions will incur a non negligible performance cost (internally, Paxos will be used) so this should be used sparingly.</p><p>In an <code>UPDATE</code> statement, all updates within the same partition key are applied atomically and in isolation.</p><p>The <code>c = c + 3</code> form of <code><assignment></code> is used to increment/decrement counters. The identifier after the ‘=’ sign <strong>must</strong> be the same than the one before the ‘=’ sign (Only increment/decrement is supported on counters, not the assignment of a specific value).</p><p>The <code>id = id + <collection-literal></code> and <code>id[value1] = value2</code> forms of <code><assignment></code> are for collections. Please refer to the <a href="#collections">relevant section</a> for more details.</p><h4 id="updateOptions"><code><options></code></h4><p>The <code>UPDATE</code> and <code>INSERT</code> statements allows to specify the following options for the insertion:</p><ul><li><code>TIMESTAMP</code>: sets the timestamp for the operation. If not specified, the coo rdinator will use the current time (in microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.</li><li><code>TTL</code>: allows to specify an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not the column themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL is specified in that update). By default, values never expire. A TTL of 0 or a negative one is equivalent to no TTL.</li></ul><h3 id="deleteStmt">DELETE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre><delete-stmt> ::= DELETE ( <selection> ( ',' <selection> )* )? +</pre></pre><p><br/>The <code>UPDATE</code> statement writes one or more columns for a given row in a table. The <code><where-clause></code> is used to select the row to update and must include all columns composing the <code>PRIMARY KEY</code> (the <code>IN</code> relation is only supported for the last column of the partition key). Other columns values are specified through <code><assignment></code> after the <code>SET</code> keyword.</p><p>Note that unlike in SQL, <code>UPDATE</code> does not check the prior existence of the row by default (except through the use of <code><condition></code>, see below): the row is created if none existed before, and updated otherwise. Furthermore, there is no mean to know which of creation or update happened.</p><p>It is however possible to use the conditions on some columns through <code>IF</code>, in which case the row will not be updated unless such condition are met. But please note that using <code>IF</code> conditions will incur a non negligible performance cost (internally, Paxos will be used) so this should be used sparingly.</p><p>In an <code>UPDATE</code> statement, all updates within the same partition key are applied atomically and in isolation.</p><p>The <code>c = c + 3</code> form of <code><assignment></code> is used to increment/decrement counters. The identifier after the ‘=’ sign <strong>must</strong> be the same than the one before the ‘=’ sign (Only increment/decrement is supported on counters, not the assignment of a specific value).</p><p>The <code>id = id + <collection-literal></code> and <code>id[value1] = value2</code> forms of <code><assignment></code> are for collections. Please refer to the <a href="#collections">relevant section</a> for more details.</p><h4 id="updateOptions"><code><options></code></h4><p>The <code>UPDATE</code> and <code>INSERT</code> statements allows to specify the following options for the insertion:</p><ul><li><code>TIMESTAMP</cod e>: sets the timestamp for the operation. If not specified, the coordinator will use the current time (in microseconds) at the start of statement execution as the timestamp. This is usually a suitable default.</li><li><code>TTL</code>: allows to specify an optional Time To Live (in seconds) for the inserted values. If set, the inserted values are automatically removed from the database after the specified time. Note that the TTL concerns the inserted values, not the column themselves. This means that any subsequent update of the column will also reset the TTL (to whatever TTL is specified in that update). By default, values never expire. A TTL of 0 or a negative one is equivalent to no TTL.</li></ul><h3 id="deleteStmt">DELETE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre><delete-stmt> ::= DELETE ( <selection> ( ',' <selection> )* )? FROM <tablename> ( USING TIMESTAMP <integer>)? WHERE <where-clause> @@ -289,10 +293,14 @@ UPDATE UserActions SET total = total + 2 <relation> ::= <identifier> '=' <term> | <identifier> IN '(' ( <term> ( ',' <term> )* )? ')' - | <identifier> IN '?' + | <identifier> IN <variable> + +<condition> ::= <identifier> <op> <term> + | <identifier> IN (<variable> | '(' ( <term> ( ',' <term> )* )? ')') + | <identifier> '[' <term> ']' <op> <term> + | <identifier> '[' <term> ']' IN <term> -<condition> ::= <identifier> '=' <term> - | <identifier> '[' <term> ']' '=' <term> +<op> ::= '<' | '<=' | '=' | '!=' | '>=' | '>' </pre></pre><p><br/><i>Sample:</i></p><pre class="sample"><pre>DELETE FROM NerdMovies USING TIMESTAMP 1240003134 WHERE movie = 'Serenity'; DELETE phone FROM Users WHERE userid IN (C73DE1D3-AF08-40F3-B124-3FF3E5109F22, B70DE1D0-9908-4AE3-BE34-5573E5B09F14); @@ -573,7 +581,7 @@ UPDATE plays SET scores = scores - [ 12, username text, ... ) -</pre></pre><p>then the <code>token</code> function will take a single argument of type <code>text</code> (in that case, the partition key is <code>userid</code> (there is no clustering columns so the partition key is the same than the primary key)), and the return type will be <code>bigint</code>.</p><h3 id="uuidFun">Uuid</h3><p>The <code>uuid</code> function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or SET statements.</p><h3 id="timeuuidFun">Timeuuid functions</h3><h4 id="now"><code>now</code></h4><p>The <code>now</code> function takes no arguments and generates a new unique timeuuid (at the time where the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in <code>WHERE</code> clauses. For instance, a query of the form</p><pre class="sample"><pre>SELECT * FROM myTable WHERE t = now() +</pre></pre><p>then the <code>token</code> function will take a single argument of type <code>text</code> (in that case, the partition key is <code>userid</code> (there is no clustering columns so the partition key is the same than the primary key)), and the return type will be <code>bigint</code>.</p><h3 id="uuidFun">Uuid</h3><p>The <code>uuid</code> function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or SET statements.</p><h3 id="timeuuidFun">Timeuuid functions</h3><h4 id="now"><code>now</code></h4><p>The <code>now</code> function takes no arguments and generates, on the coordinator node, a new unique timeuuid (at the time where the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in <code>WHERE</code> clauses. For instance, a query of the form</p><pre class="sample"><pre>SELECT * FROM myTable WHERE t = now() </pre></pre><p>will never return any result by design, since the value returned by <code>now()</code> is guaranteed to be unique.</p><h4 id="minTimeuuidandmaxTimeuuid"><code>minTimeuuid</code> and <code>maxTimeuuid</code></h4><p>The <code>minTimeuuid</code> (resp. <code>maxTimeuuid</code>) function takes a <code>timestamp</code> value <code>t</code> (which can be <a href="#usingtimestamps">either a timestamp or a date string</a> ) and return a <em>fake</em> <code>timeuuid</code> corresponding to the <em>smallest</em> (resp. <em>biggest</em>) possible <code>timeuuid</code> having for timestamp <code>t</code>. So for instance:</p><pre class="sample"><pre>SELECT * FROM myTable WHERE t > maxTimeuuid('2013-01-01 00:05+0000') AND t < minTimeuuid('2013-02-02 10:00+0000') </pre></pre><p>will select all rows where the <code>timeuuid</code> column <code>t</code> is strictly older than ‘2013-01-01 00:05+0000’ but strictly younger than ‘2013-02-02 10:00+0000’. Please note that <code>t >= maxTimeuuid('2013-01-01 00:05+0000')</code> would still <em>not</em> select a <code>timeuuid</code> generated exactly at ‘2013-01-01 00:05+0000’ and is essentially equivalent to <code>t > maxTimeuuid('2013-01-01 00:05+0000')</code>.</p><p><em>Warning</em>: We called the values generated by <code>minTimeuuid</code> and <code>maxTimeuuid</code> <em>fake</em> UUID because they do no respect the Time-Based UUID generation process specified by the <a href="http://www.ietf.org/rfc/rfc4122.txt">RFC 4122</a>. In particular, the value returned by these 2 methods will not be unique. This means you should only use those methods for querying (as in the example above). Inserting the result of those methods is almost certainly <em>a bad idea</em>.< /p><h3 id="timeFun">Time conversion functions</h3><p>A number of functions are provided to “convert” a <code>timeuuid</code>, a <code>timestamp</code> or a <code>date</code> into another <code>native</code> type.</p><table><tr><th>function name </th><th>input type </th><th>description</th></tr><tr><td><code>toDate</code> </td><td><code>timeuuid</code> </td><td>Converts the <code>timeuuid</code> argument into a <code>date</code> type</td></tr><tr><td><code>toDate</code> </td><td><code>timestamp</code> </td><td>Converts the <code>timestamp</code> argument into a <code>date</code> type</td></tr><tr><td><code>toTimestamp</code> </td><td><code>timeuuid</code> </td><td>Converts the <code>timeuuid</code> argument into a <code>timestamp</code> type</td></tr><tr><td><code>toTimestamp</code> </td><td><code>date</code> </td><td>Converts the <code>date</code> argument into a <code>timestamp</code> type</td></tr><tr><td><c ode>toUnixTimestamp</code> </td><td><code>timeuuid</code> </td><td>Converts the <code>timeuuid</code> argument into a <code>bigInt</code> raw value</td></tr><tr><td><code>toUnixTimestamp</code> </td><td><code>timestamp</code> </td><td>Converts the <code>timestamp</code> argument into a <code>bigInt</code> raw value</td></tr><tr><td><code>toUnixTimestamp</code> </td><td><code>date</code> </td><td>Converts the <code>date</code> argument into a <code>bigInt</code> raw value</td></tr><tr><td><code>dateOf</code> </td><td><code>timeuuid</code> </td><td>Similar to <code>toTimestamp(timeuuid)</code> (DEPRECATED)</td></tr><tr><td><code>unixTimestampOf</code> </td><td><code>timeuuid</code> </td><td>Similar to <code>toUnixTimestamp(timeuuid)</code> (DEPRECATED)</td></tr></table><h3 id="blobFun">Blob conversion functions</h3><p>A number of functions are provided to “convert” the native types into binary data (<code>blob</code>). For eve ry <code><native-type></code> <code>type</code> supported by CQL3 (a notable exceptions is <code>blob</code>, for obvious reasons), the function <code>typeAsBlob</code> takes a argument of type <code>type</code> and return it as a <code>blob</code>. Conversely, the function <code>blobAsType</code> takes a 64-bit <code>blob</code> argument and convert it to a <code>bigint</code> value. And so for instance, <code>bigintAsBlob(3)</code> is <code>0x0000000000000003</code> and <code>blobAsBigint(0x0000000000000003)</code> is <code>3</code>.</p><h2 id="aggregates">Aggregates</h2><p>Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.<br/>If <code>normal</code> columns, <code>scalar functions</code>, <code>UDT</code> fields, <code>writetime</code> or <code>ttl</code> are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.</p><p>CQL3 distinguishes b etween built-in aggregates (so called ‘native aggregates’) and <a href="#udas">user-defined aggregates</a>. CQL3 includes several native aggregates, described below:</p><h3 id="countFct">Count</h3><p>The <code>count</code> function can be used to count the rows returned by a query. Example:</p><pre class="sample"><pre>SELECT COUNT(*) FROM plays; SELECT COUNT(1) FROM plays;