Phoneix

https://github.com/forcedotcom/phoenix

 

http://www.vogella.com/articles/MySQLJava/article.html 

 

HBase shell :

create 't1', {NAME => 'f1', VERSIONS => 5}

put 't1', "row1", 'f1:1', "aaa"

scan 't1'

 

Phoneix:

 

!tables

 

CREATE TABLE IF NOT EXISTS "t1" (
     row VARCHAR NOT NULL,
     "f1"."1" VARCHAR
     CONSTRAINT PK PRIMARY KEY (row)
);

select * from "t1";

drop table "t1"; // t1表里面的数据被删除掉了,但是table是存在的。

 

 ./sqlline.sh node35:2224 ../examples/stock_symbol.sql

./psql.sh node35:2224 ../examples/web_stat.sql ../examples/web_stat.csv ../examples/web_stat_queries.sql

./performance.sh node35:2224 1000000

 

performance.sh里面

# Create Table DDL

createtable="CREATE TABLE IF NOT EXISTS $table (HOST CHAR(2) NOT NULL,DOMAIN VARCHAR NOT NULL,

FEATURE VARCHAR NOT NULL,DATE DATE NOT NULL,USAGE.CORE BIGINT,USAGE.DB BIGINT,STATS.ACTIVE_VISITOR 

INTEGER CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, DATE)) 

SPLIT ON ('CSGoogle','CSSalesforce','EUApple','EUGoogle','EUSalesforce','NAApple','NAGoogle','NASalesforce');"

 


0: jdbc:phoenix:node35:2224> select count(*) from PERFORMANCE_1000000;
+----------+
| COUNT(1) |
+----------+
| 1000000  |
+----------+

0: jdbc:phoenix:node35:2224> select * from PERFORMANCE_1000000 limit 2;
+------+------------+------------+---------------------+----------+----------+----------------+
| HOST |   DOMAIN   |  FEATURE   |        DATE         |   CORE   |    DB    | ACTIVE_VISITOR |
+------+------------+------------+---------------------+----------+----------+----------------+
| CS   | Apple.com  | Dashboard  | 2013-10-22          | 425      | 1906     | 4744           |
| CS   | Apple.com  | Dashboard  | 2013-10-22          | 471      | 875      | 9533           |
+------+------------+------------+---------------------+----------+----------+----------------+

0: jdbc:phoenix:node35:2224> explain select count(*) from PERFORMANCE_1000000;
+------------+
|    PLAN    |
+------------+
| CLIENT PARALLEL 27-WAY FULL SCAN OVER PERFORMANCE_1000000 |
|     SERVER AGGREGATE INTO SINGLE ROW |
+------------+

0: jdbc:phoenix:node35:2224> explain select * from PERFORMANCE_1000000 limit 2;
+------------+
|    PLAN    |
+------------+
| CLIENT PARALLEL 27-WAY FULL SCAN OVER PERFORMANCE_1000000 |
|     SERVER FILTER BY PageFilter 2 |
| CLIENT 2 ROW LIMIT |
+------------+

 

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.Statement;


public class UsePhoenix {

  public static void main(String[] args) {

    try {
      // Register
      Class.forName("com.salesforce.phoenix.jdbc.PhoenixDriver");
      // Address
      String DBConnectionString = "jdbc:phoenix:10.232.98.35:2224";
      Connection connection = DriverManager.getConnection(DBConnectionString);

      Statement statement = connection.createStatement();
      ResultSet rs = statement
          .executeQuery("select count( *) from PERFORMANCE_1000000");
      rs.next();
      System.out.println("Count " + rs.getLong(1));

      connection.close();
    } catch (Exception e) {
      e.printStackTrace();
    }

  }

}

 

 

./psql.sh node35:2224 count.sql

./sqlline.sh node35:2224 count.sql

 

 http://www.slideshare.net/Hadoop_Summit/taylor-june27-230pmhall1v2?from_search=6

 

 

phoenix使用的元数据都放在一张hbase表(SYSTEM.TABLE )里面,建表的时候可以直接映射到已有的HBase表,这样子数据不需要迁移。

见 https://github.com/forcedotcom/phoenix/wiki

Mapping to an Existing HBase Table

 

如下操作先创建一张hbase表

create 't2', {NAME => 'f1', VERSIONS => 5}

put 't2', "row1", 'f1:q', "aaa"
put 't2', "row2", 'f1:q', "bbb"
put 't2', "row3", 'f1:q', "ccc"

 

在phoenix建一张同样的表
./sqlline.sh localhost

CREATE TABLE IF NOT EXISTS "t2" (
     row VARCHAR NOT NULL,
     "f1"."q" VARCHAR
     CONSTRAINT PK PRIMARY KEY (row)
);

t2 f1 q需要用双引号括起来,原因主要是大小写的问题,参考phoenix的wiki  https://github.com/forcedotcom/phoenix/wiki
注意在这里,phoenix会修改table的Descriptor,然后添加coprocessor,所以会先disable,在modify,最后enable表。

0: jdbc:phoenix:localhost> CREATE TABLE IF NOT EXISTS "t2" (
. . . . . . . . . . . . .>      row VARCHAR NOT NULL,
. . . . . . . . . . . . .>      "f1"."q" VARCHAR
. . . . . . . . . . . . .>      CONSTRAINT PK PRIMARY KEY (row)
. . . . . . . . . . . . .> );


0: jdbc:phoenix:localhost> select * from "t2";
+------------+------------+
|    ROW     |     q      |
+------------+------------+
| row1       | aaa        |
| row2       | bbb        |
| row3       | ccc        |
+------------+------------+
0: jdbc:phoenix:localhost> SELECT COUNT(1) FROM "t2";
+----------+
| COUNT(1) |
+----------+
| 3        |
+----------+

 

't2', {METHOD => 'table_att', coprocessor$1 => '|com.salesforce.phoenix.coprocessor.ScanRegionObserver|1|', coprocessor$2 => '|com.salesforce.phoenix.coprocessor.UngroupedAggregateRegionObserver|1|', coprocessor$3 => '|com.salesforce.phoenix.coprocessor.GroupedAggregateRegionObserver|1|', coprocessor$4 => '|com.salesforce.phoenix.join.HashJoiningRegionObserver|1|'}, {NAME => 'f1', VERSIONS => '5', KEEP_DELETED_CELLS => 'true'}

 

'SYSTEM.TABLE', {METHOD => 'table_att', coprocessor$1 => '|com.salesforce.phoenix.coprocessor.ScanRegionObserver|1|', coprocessor$2 => '|com.salesforce.phoenix.coprocessor.UngroupedAggregateRegionObserver|1|', coprocessor$3 => '|com.salesforce.phoenix.coprocessor.GroupedAggregateRegionObserver|1|', coprocessor$4 => '|com.salesforce.phoenix.join.HashJoiningRegionObserver|1|', coprocessor$5 => '|com.salesforce.phoenix.coprocessor.MetaDataEndpointImpl|1|', coprocessor$6 => '|com.salesforce.phoenix.coprocessor.MetaDataRegionObserver|2|', CONFIG => {'SPLIT_POLICY' => 'com.salesforce.phoenix.schema.MetaDataSplitPolicy', 'UpgradeTo20' => 'true'}}, {NAME => '_0', DATA_BLOCK_ENCODING => 'FAST_DIFF', VERSIONS => '1000', KEEP_DELETED_CELLS => 'true'}

 

你可能感兴趣的:(one)