您好,登錄后才能下訂單哦!
本篇內容主要講解“nginx回源時bind ip的優化方法是什么”,感興趣的朋友不妨來看看。本文介紹的方法操作簡單快捷,實用性強。下面就讓小編來帶大家學習“nginx回源時bind ip的優化方法是什么”吧!
proxy_bind隸屬于proxy_module,為向后端建立連接時的local ip,在nginx源碼中只支持bind一個ip進行回源,若想使用多個ip進行回源時,可以修改源碼支持bind ip數組。在實際應用中我就是這樣做的。bind ip數據輪詢選擇ip進行回源與upstream建立連接,以解決單ip回源連接數限制問題。下面proxy_bind部分就是針對proxy_bind進行優化后的代碼,支持bind多ip。
check_bind則是對源站進行健康檢查所使用的源ip,在對upstream進行健康檢查時,所使用的源ip與upstream建連根據響應進行判斷健康狀況,將健康檢查所使用的ip組與業務回源使用的ip組進行區分開來。check_bind配置并不是nginx自帶的功能需要對nginx進行二次開發。
nginx源碼配置:
{ ngx_string("proxy_bind"),
NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE12,
ngx_http_upstream_bind_set_slot,
NGX_HTTP_LOC_CONF_OFFSET,
offsetof(ngx_http_proxy_loc_conf_t, upstream.local),
NULL },
改進后配置:
ngx_string("proxy_bind"),
NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_1MORE,
ngx_http_upstream_bind_set_slot_array,
NGX_HTTP_LOC_CONF_OFFSET,
offsetof(ngx_http_proxy_loc_conf_t, upstream.local_array),
NULL },
下面是相關代碼優化部分:
char *ngx_http_upstream_bind_set_slot_array(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf)
{
...
ngx_http_upstream_local_array_t **plocal, *local;
plocal = (ngx_http_upstream_local_array_t **) (p + cmd->offset);
if (*plocal != NGX_CONF_UNSET_PTR) {
return "bind is duplicate";
}
value = cf->args->elts;
// 建立local array
local = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_local_array_t));
*plocal = local;
// 建立local peer addr
local->addr = ngx_pcalloc(cf->pool, sizeof(ngx_peer_addrs_t));
// 建立addr array
local->addr->addrs = ngx_array_create(cf->pool, 1, sizeof(ngx_addr_t));
// 遍歷所有的local ip,放進array中
for (i = 1; i < cf->args->nelts; i++) {
addr = ngx_array_push(local->addr->addrs);
if (addr == NULL) {
return NGX_CONF_ERROR;
}
rc = ngx_parse_addr(cf->pool, addr, value[i].data, value[i].len);
switch (rc) {
case NGX_OK:
addr->name = value[i];
break;
case NGX_DECLINED:
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid address \"%V\"", &value[i]);
/* fall through */
default:
return NGX_CONF_ERROR;
}
}
...
}
之后在init_request中將u→peer.local_array賦值為u→conf→local_array(即剛剛通過指令賦值的)
static void ngx_http_upstream_init_request(ngx_http_request_t *r)
{
...
u = r->upstream;
u->peer.local_array = ngx_http_upstream_get_local_array(r, u->conf->local_array);
...
}
這個u→conf是如何獲取的呢?其實是通過不同的handler獲得的,比如我們tengine配置中常用的proxy_pass,就可以將這個賦值上去
static ngx_int_t ngx_http_proxy_handler(ngx_http_request_t *r)
{
...
u = r->upstream;
// 獲取proxy的loc conf
plcf = ngx_http_get_module_loc_conf(r, ngx_http_proxy_module);
// 獲取對應的conf
u->conf = &plcf->upstream;
...
}
這個plcf->upstream→upstream是真正的upstream_srv_conf,是在proxy_pass中獲取的
static char *ngx_http_proxy_pass(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
...
plcf->upstream.upstream = ngx_http_upstream_add(cf, &u, 0);
...
}
增加chenk_bind模塊
check_bind 在健康檢查的main_conf中添加global_local。
ngx_string("check_bind"),
NGX_HTTP_MAIN_CONF|NGX_CONF_1MORE,
ngx_http_upstream_bind_set_slot_array,
NGX_HTTP_MAIN_CONF_OFFSET,
offsetof(ngx_http_upstream_check_main_conf_t, global_local),
NULL,
typedef struct {
ngx_uint_t check_shm_size;
ngx_http_upstream_check_peers_t *peers;
ngx_http_upstream_local_array_t *global_local;
} ngx_http_upstream_check_main_conf_t;
接下來的操作和proxy_bind類似
char * ngx_http_upstream_bind_set_slot_array(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf)
{
...
// 這里的global是upstream_check_main_conf
...
}
將global_local存在ucmcf中之后,接下來要將這個global放到每個upstream中,也就是uscf
調用的是upstream_check的init_main方法
static char * ngx_http_upstream_check_init_main_conf(ngx_conf_t *cf, void *conf)
{
...
// 拿到upstream module的main conf
umcf = ngx_http_conf_get_module_main_conf(cf, ngx_http_upstream_module);
// 拿到后端數組的指針
uscfp = umcf->upstreams.elts;
for (i = 0; i < umcf->upstreams.nelts; i++) {
// 循環賦值
if (ngx_http_upstream_check_init_srv_conf(cf, uscfp[i], ucmcf->global_local) != NGX_OK) {
return NGX_CONF_ERROR;
}
}
...
}
static char * ngx_http_upstream_check_init_srv_conf(ngx_conf_t *cf, void *conf,
ngx_http_upstream_local_array_t *global_local)
{
...
ngx_http_upstream_srv_conf_t *us = conf;
// 拿到這個upstream srv conf下的check module conf
ucscf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_check_module);
// 進行賦值
if (ucscf->global_local == NGX_CONF_UNSET_PTR) {
if (global_local != NGX_CONF_UNSET_PTR && global_local != NULL) {
ucscf->global_local = global_local;
}
}
...
}
如果dyups兼容的話,需要在add peer里面保證被賦值,同時添加peer
ngx_uint_t ngx_http_upstream_check_add_peer(ngx_conf_t *cf,
ngx_http_upstream_srv_conf_t *us, ngx_addr_t *peer_addr
#ifdef CONFIG_NGX_NS
, ngx_int_t vni, ngx_addr_t *hp_addr, ngx_addr_t *hp2_addr, __u8 *mac
#endif
)
{
...
//add check_bind support for dyups modules.
if (ucscf->global_local == NGX_CONF_UNSET_PTR) {
if (ucmcf->global_local != NGX_CONF_UNSET_PTR && ucmcf->global_local != NULL) {
ucscf->global_local = ucmcf->global_local;
}
}
// 添加peer
peers = ucmcf->peers;
peer = ngx_array_push(&peers->peers);
peer->index = peers->peers.nelts - 1;
// 這部分很關鍵,上面剛剛賦值好global_local的ucscf被賦值為peer->conf
peer->conf = ucscf;
peer->upstream_name = &us->host;
peer->peer_addr = peer_addr;
...
}
在add_timer的時將這些handler全部賦值,data指定為上面生成的peer
static ngx_int_t ngx_http_upstream_check_add_timer(ngx_http_upstream_check_peer_t *peer,
ngx_check_conf_t *check_conf, ngx_msec_t timer, ngx_log_t *log)
{
...
peer->check_ev.handler = ngx_http_upstream_check_begin_handler;
peer->check_ev.log = log;
peer->check_ev.data = peer;
...
}
ngx_http_upstream_check_connect_handler 這是upstream check的時候調用的handler
static void ngx_http_upstream_check_begin_handler(ngx_event_t *event)
{
...
if (peer->shm->owner == ngx_pid) {
ngx_http_upstream_check_connect_handler(event);
}
...
}
static void ngx_http_upstream_check_connect_handler(ngx_event_t *event)
{
...
peer = event->data;
// peer的conf就是ucscf
ucscf = peer->conf;
// 賦值
if (peer->conf->global_local != NGX_CONF_UNSET_PTR && peer->conf->global_local != NULL) {
peer->pc.local_array = peer->conf->global_local->addr;
} else {
peer->pc.local_array = NULL;
}
rc = ngx_event_connect_peer(&peer->pc);
...
}
到此,相信大家對“nginx回源時bind ip的優化方法是什么”有了更深的了解,不妨來實際操作一番吧!這里是億速云網站,更多相關內容可以進入相關頻道進行查詢,關注我們,繼續學習!
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。