GitGub代碼地址:https://github.com/GuoZhaoran/spikeSystem
1
大型高並發系統架構
1.1 負載均衡簡介
1.2 Nginx加權輪詢的演示
#配置負載均衡
upstream load_rule {
server 127.0.0.1:3001 weight=1;
server 127.0.0.1:3002 weight=2;
server 127.0.0.1:3003 weight=3;
server 127.0.0.1:3004 weight=4;
}
...
server {
listen 80;
server_name load_balance.com www.load_balance.com;
location / {
proxy_pass http://load_rule;
}
}
package main
import (
"net/http"
"os"
"strings"
)
func main() {
http.HandleFunc("/buy/ticket", handleReq)
http.ListenAndServe(":3001", nil)
}
//處理請求函數,根據請求將響應結果信息寫入日志
func handleReq(w http.ResponseWriter, r *http.Request) {
failedMsg := "handle in port:"
writeLog(failedMsg, "./stat.log")
}
//寫入日志
func writeLog(msg string, logPath string) {
fd, _ := os.OpenFile(logPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
defer fd.Close()
content := strings.Join([]string{msg, "\r\n"}, "3001")
buf := []byte(content)
fd.Write(buf)
}
ab -n 1000 -c 100 http://www.load_balance.com/buy/ticket
統計日志中的結果,3001-3004端口分別得到了100、200、300、400的請求量,這和我在nginx中配置的權重占比很好的吻合在了一起,並且負載后的流量非常的均勻、隨機。
具體的實現大家可以參考nginx的upsteam模塊實現源碼,這里推薦一篇文章:
https://www.kancloud.cn/digest/understandingnginx/202607
2
秒殺搶購系統選型
回到我們最初提到的問題中來:火車票秒殺系統如何在高並發情況下提供正常、穩定的服務呢?
從上面的介紹我們知道用戶秒殺流量通過層層的負載均衡,均勻到了不同的服務器上,即使如此,集群中的單機所承受的QPS也是非常高的。如何將單機性能優化到極致呢?要解決這個問題,我們就要想明白一件事:
通常訂票系統要處理生成訂單、減扣庫存、用戶支付這三個基本的階段,我們系統要做的事情是要保證火車票訂單不超賣、不少賣,每張售賣的車票都必須支付才有效,還要保證系統承受極高的並發。
這三個階段的先后順序改怎么分配才更加合理呢?我們來分析一下:
2.1 下單減庫存

當用戶並發請求到達服務端時,首先創建訂單,然后扣除庫存,等待用戶支付。這種順序是我們一般人首先會想到的解決方案,這種情況下也能保證訂單不會超賣,因為創建訂單之后就會減庫存,這是一個原子操作。
但是這樣也會產生一些問題。
第一就是在極限並發情況下,任何一個內存操作的細節都至關影響性能,尤其像創建訂單這種邏輯,一般都需要存儲到磁盤數據庫的,對數據庫的壓力是可想而知的;
第二是如果用戶存在惡意下單的情況,只下單不支付這樣庫存就會變少,會少賣很多訂單,雖然服務端可以限制IP和用戶的購買訂單數量,這也不算是一個好方法。
2.2 支付減庫存

如果等待用戶支付了訂單在減庫存,第一感覺就是不會少賣。但是這是並發架構的大忌,因為在極限並發情況下,用戶可能會創建很多訂單,當庫存減為零的時候很多用戶發現搶到的訂單支付不了了,這也就是所謂的“超賣”。也不能避免並發操作數據庫磁盤IO
2.3 預扣庫存

3
扣庫存的藝術
從上面的分析可知,顯然預扣庫存的方案最合理。我們進一步分析扣庫存的細節,這里還有很大的優化空間,庫存存在哪里?怎樣保證高並發下,正確的扣庫存,還能快速的響應用戶請求?
在單機低並發情況下,我們實現扣庫存通常是這樣的:

為了保證扣庫存和生成訂單的原子性,需要采用事務處理,然后取庫存判斷、減庫存,最后提交事務,整個流程有很多IO,對數據庫的操作又是阻塞的。這種方式根本不適合高並發的秒殺系統。
接下來我們對單機扣庫存的方案做優化:本地扣庫存。我們把一定的庫存量分配到本地機器,直接在內存中減庫存,然后按照之前的邏輯異步創建訂單。改進過之后的單機系統是這樣的:



4
代碼演示
Go語言原生為並發設計,我采用go語言給大家演示一下單機搶票的具體流程。
4.1 初始化工作
...
//localSpike包結構體定義
package localSpike
type LocalSpike struct {
LocalInStock int64
LocalSalesVolume int64
}
...
//remoteSpike對hash結構的定義和redis連接池
package remoteSpike
//遠程訂單存儲健值
type RemoteSpikeKeys struct {
SpikeOrderHashKey string //redis中秒殺訂單hash結構key
TotalInventoryKey string //hash結構中總訂單庫存key
QuantityOfOrderKey string //hash結構中已有訂單數量key
}
//初始化redis連接池
func NewPool() *redis.Pool {
return &redis.Pool{
MaxIdle: 10000,
MaxActive: 12000, // max number of connections
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", ":6379")
if err != nil {
panic(err.Error())
}
return c, err
},
}
}
...
func init() {
localSpike = localSpike2.LocalSpike{
LocalInStock: 150,
LocalSalesVolume: 0,
}
remoteSpike = remoteSpike2.RemoteSpikeKeys{
SpikeOrderHashKey: "ticket_hash_key",
TotalInventoryKey: "ticket_total_nums",
QuantityOfOrderKey: "ticket_sold_nums",
}
redisPool = remoteSpike2.NewPool()
done = make(chan int, 1)
done <- 1
}
4.2 本地扣庫存和統一扣庫存
package localSpike
//本地扣庫存,返回bool值
func (spike *LocalSpike) LocalDeductionStock() bool{
spike.LocalSalesVolume = spike.LocalSalesVolume + 1
return spike.LocalSalesVolume < spike.LocalInStock
}
package remoteSpike
......
const LuaScript = `
local ticket_key = KEYS[1]
local ticket_total_key = ARGV[1]
local ticket_sold_key = ARGV[2]
local ticket_total_nums = tonumber(redis.call('HGET', ticket_key, ticket_total_key))
local ticket_sold_nums = tonumber(redis.call('HGET', ticket_key, ticket_sold_key))
-- 查看是否還有余票,增加訂單數量,返回結果值
if(ticket_total_nums >= ticket_sold_nums) then
return redis.call('HINCRBY', ticket_key, ticket_sold_key, 1)
end
return 0
`
//遠端統一扣庫存
func (RemoteSpikeKeys *RemoteSpikeKeys) RemoteDeductionStock(conn redis.Conn) bool {
lua := redis.NewScript(1, LuaScript)
result, err := redis.Int(lua.Do(conn, RemoteSpikeKeys.SpikeOrderHashKey, RemoteSpikeKeys.TotalInventoryKey, RemoteSpikeKeys.QuantityOfOrderKey))
if err != nil {
return false
}
return result != 0
}
hmset ticket_hash_key "ticket_total_nums" 10000 "ticket_sold_nums" 0
4.3 響應用戶信息
package main
...
func main() {
http.HandleFunc("/buy/ticket", handleReq)
http.ListenAndServe(":3005", nil)
}
package main
//處理請求函數,根據請求將響應結果信息寫入日志
func handleReq(w http.ResponseWriter, r *http.Request) {
redisConn := redisPool.Get()
LogMsg := ""
<-done
//全局讀寫鎖
if localSpike.LocalDeductionStock() && remoteSpike.RemoteDeductionStock(redisConn) {
util.RespJson(w, 1, "搶票成功", nil)
LogMsg = LogMsg + "result:1,localSales:" + strconv.FormatInt(localSpike.LocalSalesVolume, 10)
} else {
util.RespJson(w, -1, "已售罄", nil)
LogMsg = LogMsg + "result:0,localSales:" + strconv.FormatInt(localSpike.LocalSalesVolume, 10)
}
done <- 1
//將搶票狀態寫入到log中
writeLog(LogMsg, "./stat.log")
}
func writeLog(msg string, logPath string) {
fd, _ := os.OpenFile(logPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
defer fd.Close()
content := strings.Join([]string{msg, "\r\n"}, "")
buf := []byte(content)
fd.Write(buf)
}
4.4 單機服務壓測
ab -n 10000 -c 100 http://127.0.0.1:3005/buy/ticket
This is ApacheBench, Version 2.3 <$Revision: 1826891 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 1000 requests
Completed 2000 requests
Completed 3000 requests
Completed 4000 requests
Completed 5000 requests
Completed 6000 requests
Completed 7000 requests
Completed 8000 requests
Completed 9000 requests
Completed 10000 requests
Finished 10000 requests
Server Software:
Server Hostname: 127.0.0.1
Server Port: 3005
Document Path: /buy/ticket
Document Length: 29 bytes
Concurrency Level: 100
Time taken for tests: 2.339 seconds
Complete requests: 10000
Failed requests: 0
Total transferred: 1370000 bytes
HTML transferred: 290000 bytes
Requests per second: 4275.96 [#/sec] (mean)
Time per request: 23.387 [ms] (mean)
Time per request: 0.234 [ms] (mean, across all concurrent requests)
Transfer rate: 572.08 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 8 14.7 6 223
Processing: 2 15 17.6 11 232
Waiting: 1 11 13.5 8 225
Total: 7 23 22.8 18 239
Percentage of the requests served within a certain time (ms)
50% 18
66% 24
75% 26
80% 28
90% 33
95% 39
98% 45
99% 54
100% 239 (longest request)
//stat.log
...
result:1,localSales:145
result:1,localSales:146
result:1,localSales:147
result:1,localSales:148
result:1,localSales:149
result:1,localSales:150
result:0,localSales:151
result:0,localSales:152
result:0,localSales:153
result:0,localSales:154
result:0,localSales:156
...
5
總結回顧
總體來說,秒殺系統是非常復雜的。我們這里只是簡單介紹模擬了一下單機如何優化到高性能,集群如何避免單點故障,保證訂單不超賣、不少賣的一些策略,完整的訂單系統還有訂單進度的查看,每台服務器上都有一個任務,定時的從總庫存同步余票和庫存信息展示給用戶,還有用戶在訂單有效期內不支付,釋放訂單,補充到庫存等等。
作者:繪你一世傾城
來源:juejin.im/post/5d84e21f6fb9a06ac8248149

點擊「閱讀原文」和棧長學更多~
