简单来说:JDBC是一种开放标准的跨编程语言、跨数据库类型编程API。各类型数据库产品厂商都会按它的标准要求来提供针对自身产品的JDBC驱动程序。最主要的这是一套成熟的工具,在编程人员中使用很普及。既然我们的目标是开发一套标准的大数据系统集成API,那么采用JDBC系统数据接口可以沿用业内丰富的技术资源、覆盖更多类型用户的编程需要,以及降低使用门槛。对于scala编程语言来讲,ScalikeJDBC是一套最合适满足我们下面开发要求的工具库,因为它可以用最简单易用的方式来实现JDBC的基本功能。
JDBC的基本组件和工作原理比较简单:核心组件就connection、statement(preparedstatement)两个对象。connection提供与数据库的连接以及数据处理的运行环境,statement是connection的一个属性,包含了可运行的SQL语句及对它们的各种运算方法。下面我们就用ScalikeJDBC官网上的例子来示范说明ScalikeJDBC的应用:
import scalikejdbc._ object JDBCIntro extends App { //standard java procedure to register driver
Class.forName("org.h2.Driver") //default connectionPool named 'default //ConnectionPool.singleton("jdbc:h2:mem:hello", "user", "pass") //or ConnectionPool.add('default,"jdbc:h2:mem:hello", "user", "pass") //db name is 'h2mem
ConnectionPool.add('h2mem,"jdbc:h2:mem:hello", "user", "pass")
首先,登记jdbc驱动程序及准备连接池connection-pool。注意,ScalikeJDBC提供了固定名为'default的默认连接池。所以ConnectionPool.singleton == ConnectionPool.add('default。在'default连接池上的操作都无需提供名称,如:(DB==NamedDB('default)),(AutoSession==NamedAutoSession('default))等。上面的代码示范了手工方式的连接池设置。如果应用需要替换增加数据库或调整数据库参数必须通过修改代码来实现。实用的程序应该使用配置文件的方式进行数据库设置可以轻松随意调整数据库参数,我们会在下一章介绍配置文件使用方法。
跟着是一条SQL语句的构建和运算:
//construct SQL object
val createSQL: SQL[Nothing,NoExtractor] =SQL(""" create table members ( id bigint primary key auto_increment, name varchar(30) not null, description varchar(1000), birthday date, created_at timestamp not null )""")
//implicit val session = AutoSession 'default //run this SQL
createSQL.execute().apply()(NamedAutoSession('h2mem)) //autoCommit
通过SQL("...")方法构建SQL[A,E]类型的实例,用execute()方法设定不同的运算方式,然后用apply()(implicit s:DBSession)来实际运算SQL,DBSession其实就是个connection实例。相关SQL[A,E]的类型定义如下:
/** * SQL abstraction. * * @param statement SQL template * @param rawParameters parameters * @param f extractor function * @tparam A return type */
abstract class SQL[A, E <: WithExtractor]( val statement: String, private[scalikejdbc] val rawParameters: Seq[Any] )(f: WrappedResultSet => A) extends Extractor[A] {...} /** * Extractor */
private[scalikejdbc] trait Extractor[A] { def extractor: (WrappedResultSet) => A }
SQL("...")调用了apply构建方法:
object SQL { ... def apply[A](sql: String): SQL[A, NoExtractor] = new SQLToTraversableImpl[A, NoExtractor](sql, Seq.empty)(noExtractor[A]( ErrorMessage.THIS_IS_A_BUG )) } /** * SQL which execute java.sql.Statement#executeQuery() and returns the result as scala.collection.Traversable value. * * @param statement SQL template * @param rawParameters parameters * @param extractor extractor function * @tparam A return type */
class SQLToTraversableImpl[A, E <: WithExtractor]( override val statement: String, override val rawParameters: Seq[Any] )( override val extractor: WrappedResultSet => A ) extends SQL[A, E](statement, rawParameters)(extractor) with SQLToTraversable[A, E] { override protected def withParameters(params: Seq[Any]): SQLToResult[A, E, Traversable] = { new SQLToTraversableImpl[A, E](statement, params)(extractor) } override protected def withStatementAndParameters(state: String, params: Seq[Any]): SQLToResult[A, E, Traversable] = { new SQLToTraversableImpl[A, E](state, params)(extractor) } override protected def withExtractor[B](f: WrappedResultSet => B): SQLToResult[B, HasExtractor, Traversable] = { new SQLToTraversableImpl[B, HasExtractor](statement, rawParameters)(f) } }
SQLToTraversableImpl类型提供了SQL语句参数组合方法并构建SQLToResult类型:
trait SQLToResult[A, E <: WithExtractor, C[_]] extends SQL[A, E] with Extractor[A] { import GeneralizedTypeConstraintsForWithExtractor._ def result[AA](f: WrappedResultSet => AA, session: DBSession): C[AA] val statement: String private[scalikejdbc] val rawParameters: Seq[Any] def apply()( implicit session: DBSession, context: ConnectionPoolContext = NoConnectionPoolContext, hasExtractor: ThisSQL =:= SQLWithExtractor ): C[A] = { val attributesSwitcher = createDBSessionAttributesSwitcher() val f: DBSession => C[A] = s => result[A](extractor, DBSessionWrapper(s, attributesSwitcher)) // format: OFF
session match { case AutoSession | ReadOnlyAutoSession => DB.readOnly(f) case NamedAutoSession(name, _) => NamedDB(name, session.settings).readOnly(f) case ReadOnlyNamedAutoSession(name, _) => NamedDB(name, session.settings).readOnly(f) case _ => f(session) } // format: ON
} }
这个SQLToResult的apply方法就是SQL-readonly-query的运算方法。
SQL[A,E]中类型参数A是运算SQL返回值类型,E是一种函数类型:(WrappedResultSet => A),用来把返回结果转换成A类型值。SQL类型还提供了bind,bindByName方法来绑定SQL参数。SQL是个泛式的类型,除默认转换成Query型的SQLToResult类型外还可以转换成execute、update、batch等等其它SQL类型:
/** * Binds parameters for batch * * @param parameters parameters * @return SQL for batch */ def batch(parameters: Seq[Any]*): SQLBatch = { new SQLBatch(statement, parameters, tags) } /** * Binds parameters for largeBatch * * @param parameters parameters * @return SQL for batch */ def largeBatch(parameters: Seq[Any]*): SQLLargeBatch =
new SQLLargeBatch(statement, parameters, tags) /** * Binds parameters for batch * * @param parameters parameters * @return SQL for batch */ def batchAndReturnGeneratedKey(parameters: Seq[Any]*): SQLBatchWithGeneratedKey = { new SQLBatchWithGeneratedKey(statement, parameters, tags)(None) } /** * Binds parameters for batch * * @param generatedKeyName generated key name * @param parameters parameters * @return SQL for batch */ def batchAndReturnGeneratedKey(generatedKeyName: String, parameters: Seq[Any]*): SQLBatchWithGeneratedKey = { new SQLBatchWithGeneratedKey(statement, parameters, tags)(Some(generatedKeyName)) } /** * Binds parameters for batch * * @param parameters parameters * @return SQL for batch */ def batchByName(parameters: Seq[(Symbol, Any)]*): SQLBatch = { val names = SQLTemplateParser.extractAllParameters(statement) val sqlWithPlaceHolders = SQLTemplateParser.convertToSQLWithPlaceHolders(statement) val _sql = validateAndConvertToNormalStatement(statement, sqlWithPlaceHolders, names, _settings, parameters.headOption.getOrElse(Seq.empty))._1 val _parameters: Seq[Seq[Any]] = parameters.map { p => validateAndConvertToNormalStatement(statement, sqlWithPlaceHolders, names, _settings, p)._2 } new SQLBatch(_sql, _parameters, tags) } /** * Set execution type as execute * * @return SQL instance */ def execute(): SQLExecution = { new SQLExecution(statement, rawParameters, tags)((stmt: PreparedStatement) => {})((stmt: PreparedStatement) => {}) } /** * Set execution type as execute with filters * * @param before before filter * @param after after filter * @return SQL instance */ def executeWithFilters(before: (PreparedStatement) => Unit, after: (PreparedStatement) => Unit): SQLExecution = { new SQLExecution(statement, rawParameters, tags)(before)(after) } /** * Set execution type as executeUpdate * * @return SQL instance */ def executeUpdate(): SQLUpdate = update() /** * Set execution type as executeUpdate with filters * * @param before before filter * @param after after filter * @return SQL instance */ def executeUpdateWithFilters(before: (PreparedStatement) => Unit, after: (PreparedStatement) => Unit): SQLUpdate = { updateWithFilters(before, after) } /** * Set execution type as executeUpdate * * @return SQL instance */ def update(): SQLUpdate = { new SQLUpdate(statement, rawParameters, tags)((stmt: PreparedStatement) => {})((stmt: PreparedStatement) => {}) } /** * Set execution type as executeUpdate with filters * * @param before before filter * @param after after filter * @return SQL instance */ def updateWithFilters(before: (PreparedStatement) => Unit, after: (PreparedStatement) => Unit): SQLUpdate = { new SQLUpdate(statement, rawParameters, tags)(before)(after) } /** * Set execution type as `executeLargeUpdate` * * @return SQL instance */ def largeUpdate(): SQLLargeUpdate =
new SQLLargeUpdate(statement, rawParameters, tags)(_ => {})(_ => {}) /** * Set execution type as `executeLargeUpdate` with filters * * @param before before filter * @param after after filter * @return SQL instance */ def largeUpdateWithFilters(before: PreparedStatement => Unit, after: PreparedStatement => Unit): SQLLargeUpdate =
new SQLLargeUpdate(statement, rawParameters, tags)(before)(after) /** * Set execution type as updateAndReturnGeneratedKey * * @return SQL instance */ def updateAndReturnGeneratedKey(): SQLUpdateWithGeneratedKey = { updateAndReturnGeneratedKey(1) } def updateAndReturnGeneratedKey(name: String): SQLUpdateWithGeneratedKey = { new SQLUpdateWithGeneratedKey(statement, rawParameters, this.tags)(name) } def updateAndReturnGeneratedKey(index: Int): SQLUpdateWithGeneratedKey = { new SQLUpdateWithGeneratedKey(statement, rawParameters, this.tags)(index) }
以上每种SQL类型都是一种特别的SQL运算方式,即它们都有自己独特的apply()函数如:
/** * SQL which execute java.sql.Statement#execute(). * * @param statement SQL template * @param parameters parameters * @param before before filter * @param after after filter */
class SQLExecution(val statement: String, val parameters: Seq[Any], val tags: Seq[String] = Nil)( val before: (PreparedStatement) => Unit )( val after: (PreparedStatement) => Unit ) { def apply()(implicit session: DBSession): Boolean = { val attributesSwitcher = new DBSessionAttributesSwitcher(SQL("").tags(tags: _*)) val f: DBSession => Boolean = DBSessionWrapper(_, attributesSwitcher).executeWithFilters(before, after, statement, parameters: _*) // format: OFF
session match { case AutoSession => DB.autoCommit(f) case NamedAutoSession(name, _) => NamedDB(name, session.settings).autoCommit(f) case ReadOnlyAutoSession => DB.readOnly(f) case ReadOnlyNamedAutoSession(name, _) => NamedDB(name, session.settings).readOnly(f) case _ => f(session) } // format: ON
} } /** * SQL which execute java.sql.Statement#executeUpdate(). * * @param statement SQL template * @param parameters parameters * @param before before filter * @param after after filter */
class SQLUpdate(val statement: String, val parameters: Seq[Any], val tags: Seq[String] = Nil)( val before: (PreparedStatement) => Unit )( val after: (PreparedStatement) => Unit ) { def apply()(implicit session: DBSession): Int = { val attributesSwitcher = new DBSessionAttributesSwitcher(SQL("").tags(tags: _*)) session match { case AutoSession => DB.autoCommit(DBSessionWrapper(_, attributesSwitcher).updateWithFilters(before, after, statement, parameters: _*)) case NamedAutoSession(name, _) => NamedDB(name, session.settings).autoCommit(DBSessionWrapper(_, attributesSwitcher).updateWithFilters(before, after, statement, parameters: _*)) case ReadOnlyAutoSession => DB.readOnly(DBSessionWrapper(_, attributesSwitcher).updateWithFilters(before, after, statement, parameters: _*)) case ReadOnlyNamedAutoSession(name, _) => NamedDB(name, session.settings).readOnly(DBSessionWrapper(_, attributesSwitcher).updateWithFilters(before, after, statement, parameters: _*)) case _ => DBSessionWrapper(session, attributesSwitcher).updateWithFilters(before, after, statement, parameters: _*) } } }
我们也可以把多个SQL语句共同放在一个session-block里:
import org.joda.time._ // DB autoCommit == NamedDB('default) autoCommit
NamedDB('h2mem) autoCommit { implicit session =>
val insertSql: SQL[Nothing,NoExtractor] = SQL("insert into members (name, birthday, created_at) values (?, ?, ?)") val createdAt: DateTime = DateTime.now insertSql.bind("Alice", Some(new LocalDate("1980-01-01")), createdAt).update.apply() //commit
insertSql.bind("Bob", None, createdAt).update.apply() //commit
}
这是种批次更新SQL:使用了共同PreparedStatement模版,然后绑定不同的参数进行运算。上面的代码是一种loan-pattern编程模式。除autoCommit还有readOnly,localTx等:
/** * Provides auto-commit session block. * @param execution block * @tparam A return type * @return result value */ def autoCommit[A](execution: DBSession => A): A = { if (autoCloseEnabled) using(conn)(_ => execution(autoCommitSession())) else execution(autoCommitSession()) } /** * Provides read-only session block. * @param execution block * @tparam A return type * @return result value */ def readOnly[A](execution: DBSession => A): A = { if (autoCloseEnabled) using(conn)(_ => execution(readOnlySession())) else execution(readOnlySession()) } /** * Provides local-tx session block. * @param execution block * @tparam A return type * @return result value */ def localTx[A](execution: DBSession => A)(implicit boundary: TxBoundary[A] = defaultTxBoundary[A]): A = { val doClose = if (autoCloseEnabled) () => conn.close() else () => () val tx = newTx begin(tx) val txResult = try { rollbackIfThrowable[A] { val session = DBSession( conn = conn, tx = Option(tx), connectionAttributes = connectionAttributes, settings = this.settingsProvider ) val result: A = execution(session) boundary.finishTx(result, tx) } } catch { case e: Throwable => doClose(); throw e } boundary.closeConnection(txResult, doClose) }
在autoCommit域中每一次SQL运算都自动提交事务,不支持交易回滚rollback。在下面的localTx示范里,任何运算出现异常都会导致整体事务的回滚并抛出异常:
//data model
case class Member( id: Long, name: String, description: Option[String] = None, birthday: Option[LocalDate] = None, createdAt: DateTime) def create(name: String, birthday: Option[LocalDate], remarks: Option[String])(implicit session: DBSession): Member = { val insertSQL: SQL[Nothing,NoExtractor] = sql"""insert into members (name, birthday, description, created_at)
values (${name}, ${birthday}, ${remarks}, ${DateTime.now})""" val id: Long = insertSQL.updateAndReturnGeneratedKey.apply() Member(id, name, remarks, birthday,DateTime.now) } val users = List( ("John",new LocalDate("2008-03-01"),"youngest user"), ("Susan",new LocalDate("2000-11-03"),"middle aged user"), ("Peter",new LocalDate("1983-01-21"),"oldest user"), ) import scala.util._ //Try
import scalikejdbc.TxBoundary.Try._ val result: Try[List[Member]] = NamedDB('h2mem) localTx { implicit session =>
Try { val members: List[Member] = users.map { person => create(person._1, Some(person._2), Some(person._3)) } throw new RuntimeException("fail test. boooommmm!!!!!") members } } result match { case Success(mlist) => println(s"batch added members: $mlist") case Failure(err) => println(s"${err.getMessage}") }
在上面这段代码中localTx block中的所有运算是包嵌在Try{}里的,即交易界限transaction-boundary,任何异常都被框定在这个界限里。以上的例子中不会抛出异常,返回结果包括了运算的所有状态。
我们也可以用case class或其它类型来模拟数据行类型以实现数据行的强类型操作:
//data row converter
val toMember = (rs: WrappedResultSet) => Member( id = rs.long("id"), name = rs.string("name"), description = rs.stringOpt("description"), birthday = rs.jodaLocalDateOpt("birthday"), createdAt = rs.jodaDateTime("created_at") ) val selectSQL: SQL[Member,HasExtractor] = sql"""select * from members""".map(toMember) val members: List[Member] = NamedDB('h2mem) readOnly { implicit session =>
selectSQL.list.apply() } println(s"all members: $members") NamedDB('h2mem).close()
上面的代码中使用了sql"""...""",这是标准的scala-string-interpolation,是构建SQL[A,E]的另一种方法。
下面是本次讨论的示范源代码:
import scalikejdbc._ object JDBCIntro extends App { //standard java procedure to register driver
Class.forName("org.h2.Driver") //must have a db named 'default //ConnectionPool.singleton("jdbc:h2:mem:hello", "user", "pass") //==ConnectionPool.add('default,"jdbc:h2:mem:hello", "user", "pass") //db name is 'h2mem
ConnectionPool.add('h2mem,"jdbc:h2:mem:hello", "user", "pass")
//construct SQL object
val createSQL: SQL[Nothing,NoExtractor] =SQL(""" create table members ( id bigint primary key auto_increment, name varchar(30) not null, description varchar(1000), birthday date, created_at timestamp not null )""")
//implicit val session = AutoSession 'default //run this SQL
createSQL.execute().apply()(NamedAutoSession('h2mem)) //autoCommit
import org.joda.time._ // DB autoCommit == NamedDB('default) autoCommit
NamedDB('h2mem) autoCommit { implicit session =>
val insertSql: SQL[Nothing,NoExtractor] = SQL("insert into members (name, birthday, created_at) values (?, ?, ?)") val createdAt: DateTime = DateTime.now insertSql.bind("Alice", Some(new LocalDate("1980-01-01")), createdAt).update.apply() //commit
insertSql.bind("Bob", None, createdAt).update.apply() //commit
} //data model
case class Member( id: Long, name: String, description: Option[String] = None, birthday: Option[LocalDate] = None, createdAt: DateTime) def create(name: String, birthday: Option[LocalDate], remarks: Option[String])(implicit session: DBSession): Member = { val insertSQL: SQL[Nothing,NoExtractor] = sql"""insert into members (name, birthday, description, created_at)
values (${name}, ${birthday}, ${remarks}, ${DateTime.now})""" val id: Long = insertSQL.updateAndReturnGeneratedKey.apply() Member(id, name, remarks, birthday,DateTime.now) } val users = List( ("John",new LocalDate("2008-03-01"),"youngest user"), ("Susan",new LocalDate("2000-11-03"),"middle aged user"), ("Peter",new LocalDate("1983-01-21"),"oldest user"), ) import scala.util._ //Try
import scalikejdbc.TxBoundary.Try._ val result: Try[List[Member]] = NamedDB('h2mem) localTx { implicit session =>
Try { val members: List[Member] = users.map { person => create(person._1, Some(person._2), Some(person._3)) } throw new RuntimeException("fail test. boooommmm!!!!!") members } } result match { case Success(mlist) => println(s"batch added members: $mlist") case Failure(err) => println(s"${err.getMessage}") } //data row converter
val toMember = (rs: WrappedResultSet) => Member( id = rs.long("id"), name = rs.string("name"), description = rs.stringOpt("description"), birthday = rs.jodaLocalDateOpt("birthday"), createdAt = rs.jodaDateTime("created_at") ) val selectSQL: SQL[Member,HasExtractor] = sql"""select * from members""".map(toMember) val members: List[Member] = NamedDB('h2mem) readOnly { implicit session =>
selectSQL.list.apply() } println(s"all members: $members") NamedDB('h2mem).close()
}