# APISIX源码解析 - 启动篇 ## APISIX源码解析 - 启动篇 > apisix/cli/apisix.lua - 获取执行命令所需要的env变量 ``` local env = require("apisix.cli.env")(apisix_home, pkg_cpath_org, pkg_path_org) ``` > apisix/cli/env.lua ``` #apisix_home = "/usr/local/apisix" #pkg_cpath_org = "deps/lib64/lua/5.1/?.so;deps/lib/lua/5.1/?.so;" #pkg_path_org = "deps/share/lua/5.1/?.lua;" return function (apisix_home, pkg_cpath_org, pkg_path_org) -- 获取当前机器的ulimit -n配置 local res, err = util.execute_cmd("ulimit -n") if not res then error("failed to exec ulimit cmd \'ulimit -n \', err: " .. err) end local trimed_res = util.trim(res) local ulimit = trimed_res == "unlimited" and trimed_res or tonumber(trimed_res) if not ulimit then error("failed to fetch current maximum number of open file descriptors") end local is_root_path = false -- /usr/local/apisix/apisix/cli/apisix.lua local script_path = arg[0] -- 判断script_path是否为相对路径 if script_path:sub(1, 2) == './' then -- 获取当前路径为apisix_home apisix_home = util.trim(util.execute_cmd("pwd")) if not apisix_home then error("failed to fetch current path") end -- 判断apisix_home目录是否在/root目录下,如果在,就把is_root_path设置为true if str_find(apisix_home .. "/", '/root/', nil, true) == 1 then is_root_path = true end local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;" .. apisix_home .. "/deps/lib/lua/5.1/?.so;" local pkg_path = apisix_home .. "/?/init.lua;" .. apisix_home .. "/deps/share/lua/5.1/?/init.lua;" .. apisix_home .. "/deps/share/lua/5.1/?.lua;;" -- 拼接cpath以及path位置 package.cpath = pkg_cpath .. package.cpath package.path = pkg_path .. package.path end do -- skip luajit environment local ok = pcall(require, "table.new") if not ok then local ok, json = pcall(require, "cjson") if ok and json then stderr:write("please remove the cjson library in Lua, it may " .. "conflict with the cjson library in openresty. " .. "\n luarocks remove lua-cjson\n") exit(1) end end end -- 获取openresty二进制可执行文件路径 res, err = util.execute_cmd("command -v openresty") if not res then error("failed to exec cmd \'command -v openresty\', err: " .. err) end local openresty_path_abs = util.trim(res) -- 拼接openresty启动命令 local openresty_args = openresty_path_abs .. [[ -p ]] .. apisix_home .. [[ -c ]] .. apisix_home .. [[/conf/nginx.conf]] -- 获取openresty info信息 local or_info, err = util.execute_cmd("openresty -V 2>&1") if not or_info then error("failed to exec cmd \'openresty -V 2>&1\', err: " .. err) end local use_apisix_base = true -- 查看是否安装了apisix-nginx-module模块 if not or_info:find("apisix-nginx-module", 1, true) then use_apisix_base = false end local min_etcd_version = "3.4.0" return { apisix_home = apisix_home, is_root_path = is_root_path, openresty_args = openresty_args, openresty_info = or_info, use_apisix_base = use_apisix_base, pkg_cpath_org = pkg_cpath_org, pkg_path_org = pkg_path_org, min_etcd_version = min_etcd_version, ulimit = ulimit, } end ``` - 入口函数 ```] local ops = require("apisix.cli.ops") ops.execute(env, arg) ``` > apisix/cli/ops.lua ``` -- 支持的子命令 local action = { help = help, version = version, init = init, init_etcd = etcd.init, start = start, stop = stop, quit = quit, restart = restart, reload = reload, test = test, } function _M.execute(env, arg) -- start 、 help等 local cmd_action = arg[1] if not cmd_action then return help() end if not action[cmd_action] then stderr:write("invalid argument: ", cmd_action, "\n") return help() end -- 所拥有的参数命令 action[cmd_action](env, arg[2]) end ``` ### 子命令 - 启动 start > apisix/cli/ops.lua ``` local function start(env, ...) -- 由于apisix启动的工作进程具有“nobody”权限,无法访问“/root”目录。因此,有必要禁止APISIX在/root目录下运行。 if env.is_root_path then util.die("Error: It is forbidden to run APISIX in the /root directory.\n") end -- 拼接日志目录,不存在就创建,存在但不是目录或者链接则报错 local logs_path = env.apisix_home .. "/logs" if not pl_path.exists(logs_path) then local _, err = pl_path.mkdir(logs_path) if err ~= nil then util.die("failed to mkdir ", logs_path, ", error: ", err) end elseif not pl_path.isdir(logs_path) and not pl_path.islink(logs_path) then util.die(logs_path, " is not directory nor symbol link") end -- 通过kill -0 `/logs/nginx.pid`检查nginx是否启动 local pid_path = env.apisix_home .. "/logs/nginx.pid" local pid = util.read_file(pid_path) pid = tonumber(pid) if pid then if pid <= 0 then print("invalid pid") return end local signone = 0 local ok, err, err_no = signal.kill(pid, signone) if ok then print("APISIX is running...") return -- no such process elseif err_no ~= errno.ESRCH then print(err) return end print("nginx.pid exists but there's no corresponding process with pid ", pid, ", the file will be overwritten") end -- 启动apisix local conf_server_sock_path = env.apisix_home .. "/conf/config_listen.sock" if pl_path.exists(conf_server_sock_path) then -- remove stale sock (if exists) so that APISIX can start local ok, err = os_remove(conf_server_sock_path) if not ok then util.die("failed to remove stale conf server sock file, error: ", err) end end -- 解析args参数 https://argparse.readthedocs.io/en/stable/options.html local parser = argparse() parser:argument("_", "Placeholder") parser:option("-c --config", "location of customized config.yaml") -- TODO: more logs for APISIX cli could be added using this feature parser:flag("--verbose", "show init_etcd debug information") local args = parser:parse() -- 将老的配置文件重命名为配置文件名.bak,将新的配置文件链接到老配置文件路径 local customized_yaml = args["config"] if customized_yaml then profile.apisix_home = env.apisix_home .. "/" local local_conf_path = profile:yaml_path("config") local local_conf_path_bak = local_conf_path .. ".bak" local ok, err = os_rename(local_conf_path, local_conf_path_bak) if not ok then util.die("failed to backup config, error: ", err) end local ok, err1 = lfs.link(customized_yaml, local_conf_path) if not ok then ok, err = os_rename(local_conf_path_bak, local_conf_path) if not ok then util.die("failed to recover original config file, error: ", err) end util.die("failed to link customized config, error: ", err1) end print("Use customized yaml: ", customized_yaml) end -- 执行init子命令函数,进行初始化操作,具体参考init子命令 init(env) -- 如果deployment_role不是data_plane数据平面,那么就初始化etcd(控制平面) if env.deployment_role ~= "data_plane" then -- 初始化etcd,并检测etcd是否能正常工作 init_etcd(env, args) end -- 执行openresty启动命令 util.execute_cmd(env.openresty_args) end ``` ### 子命令 - 帮助 help ``` local function help() print([[ Usage: apisix [action] help: show this message, then exit init: initialize the local nginx.conf init_etcd: initialize the data of etcd start: start the apisix server stop: stop the apisix server quit: stop the apisix server gracefully restart: restart the apisix server reload: reload the apisix server test: test the generated nginx.conf version: print the version of apisix ]]) end ``` ### 子命令 - 版本信息 version ``` local function version() print(ver['VERSION']) end ``` ### 子命令 - 初始化 init ``` local function init(env) -- 判断是否是在root目录下 if env.is_root_path then print('Warning! Running apisix under /root is only suitable for ' .. 'development environments and it is dangerous to do so. ' .. 'It is recommended to run APISIX in a directory ' .. 'other than /root.') end -- 判断获取到机器的打开文件句柄数量是否正确或者低于min_ulimit 1024 local min_ulimit = 1024 if env.ulimit ~= "unlimited" and env.ulimit <= min_ulimit then print(str_format("Warning! Current maximum number of open file " .. "descriptors [%d] is not greater than %d, please increase user limits by " .. "execute \'ulimit -n \' , otherwise the performance" .. " is low.", env.ulimit, min_ulimit)) end -- 读取默认配置以及用户定义配置,并进行合并 local yaml_conf, err = file.read_yaml_conf(env.apisix_home) if not yaml_conf then util.die("failed to read local yaml config of apisix: ", err, "\n") end local ok, err = schema.validate(yaml_conf) if not ok then util.die(err, "\n") end -- 针对admin api做相关检测,白名单、token等 local checked_admin_key = false local allow_admin = yaml_conf.deployment.admin and yaml_conf.deployment.admin.allow_admin if yaml_conf.apisix.enable_admin and allow_admin then for _, allow_ip in ipairs(allow_admin) do if allow_ip == "127.0.0.0/24" then checked_admin_key = true end end end if yaml_conf.apisix.enable_admin and not checked_admin_key then local help = [[ %s Please modify "admin_key" in conf/config.yaml . ]] local admin_key = yaml_conf.deployment.admin if admin_key then admin_key = admin_key.admin_key end if type(admin_key) ~= "table" or #admin_key == 0 then util.die(help:format("ERROR: missing valid Admin API token.")) end for _, admin in ipairs(admin_key) do if type(admin.key) == "table" then admin.key = "" else admin.key = tostring(admin.key) end if admin.key == "" then util.die(help:format("ERROR: missing valid Admin API token."), "\n") end if admin.key == "edd1c9f034335f136f87ad84b625c8f1" then stderr:write( help:format([[WARNING: using fixed Admin API token has security risk.]]), "\n" ) end end end -- admin证书相关校验配置 if yaml_conf.deployment.admin then local admin_api_mtls = yaml_conf.deployment.admin.admin_api_mtls local https_admin = yaml_conf.deployment.admin.https_admin if https_admin and not (admin_api_mtls and admin_api_mtls.admin_ssl_cert and admin_api_mtls.admin_ssl_cert ~= "" and admin_api_mtls.admin_ssl_cert_key and admin_api_mtls.admin_ssl_cert_key ~= "") then util.die("missing ssl cert for https admin") end end if yaml_conf.apisix.enable_admin and yaml_conf.deployment.config_provider == "yaml" then util.die("ERROR: Admin API can only be used with etcd config_provider.\n") end -- 获取openresty版本,如果没有既说明找不到本机的openresty可执行文件 local or_ver = get_openresty_version() if or_ver == nil then util.die("can not find openresty\n") end -- 根据获取的openresty版本信息进行比较是否为指定版本范围内 local need_ver = "1.19.3" if not version_greater_equal(or_ver, need_ver) then util.die("openresty version must >=", need_ver, " current ", or_ver, "\n") end -- 检查openresty是否安装了http_stub_status_module local or_info = env.openresty_info if not or_info:find("http_stub_status_module", 1, true) then util.die("'http_stub_status_module' module is missing in ", "your openresty, please check it out.\n") end -- 判断是否启用http,以及是否只启用了stream local enable_http = true if not yaml_conf.apisix.enable_admin and yaml_conf.apisix.stream_proxy and yaml_conf.apisix.stream_proxy.only ~= false then enable_http = false end -- 是否启动了自动发现 local enabled_discoveries = {} for name in pairs(yaml_conf.discovery or {}) do enabled_discoveries[name] = true end -- 启动的插件 local enabled_plugins = {} for i, name in ipairs(yaml_conf.plugins or {}) do enabled_plugins[name] = true end -- 启动的stream插件 local enabled_stream_plugins = {} for i, name in ipairs(yaml_conf.stream_plugins or {}) do enabled_stream_plugins[name] = true end -- 启动了插件proxy-cache,但是没有找到相关配置 if enabled_plugins["proxy-cache"] and not yaml_conf.apisix.proxy_cache then util.die("missing apisix.proxy_cache for plugin proxy-cache\n") end -- 是否启动了插件batch-requests,批请求 if enabled_plugins["batch-requests"] then local pass_real_client_ip = false local real_ip_from = yaml_conf.nginx_config.http.real_ip_from -- the real_ip_from is enabled by default, we just need to make sure it's -- not disabled by the users if real_ip_from then for _, ip in ipairs(real_ip_from) do local _ip = cli_ip:new(ip) if _ip then if _ip:is_loopback() or _ip:is_unspecified() then pass_real_client_ip = true end end end end if not pass_real_client_ip then util.die("missing loopback or unspecified in the nginx_config.http.real_ip_from" .. " for plugin batch-requests\n") end end local ports_to_check = {} local function validate_and_get_listen_addr(port_name, default_ip, configured_ip, default_port, configured_port) local ip = configured_ip or default_ip local port = tonumber(configured_port) or default_port if ports_to_check[port] ~= nil then util.die(port_name .. " ", port, " conflicts with ", ports_to_check[port], "\n") end ports_to_check[port] = port_name return ip .. ":" .. port end -- 监听在管理中使用单独端口,支持指定IP,兼容原有风格,并检测listen格式是否正确 local admin_server_addr if yaml_conf.apisix.enable_admin then local ip = yaml_conf.deployment.admin.admin_listen.ip local port = yaml_conf.deployment.admin.admin_listen.port admin_server_addr = validate_and_get_listen_addr("admin port", "0.0.0.0", ip, 9180, port) end -- 监听在control中使用单独端口,支持指定ip,并检测listen格式是否正确 local control_server_addr if yaml_conf.apisix.enable_control then if not yaml_conf.apisix.control then control_server_addr = validate_and_get_listen_addr("control port", "127.0.0.1", nil, 9090, nil) else control_server_addr = validate_and_get_listen_addr("control port", "127.0.0.1", yaml_conf.apisix.control.ip, 9090, yaml_conf.apisix.control.port) end end -- 监听在prometheus中使用listen地址,并检测listen格式是否正确 local prometheus_server_addr if yaml_conf.plugin_attr.prometheus then local prometheus = yaml_conf.plugin_attr.prometheus if prometheus.enable_export_server then prometheus_server_addr = validate_and_get_listen_addr("prometheus port", "127.0.0.1", prometheus.export_addr.ip, 9091, prometheus.export_addr.port) end end if enabled_stream_plugins["prometheus"] and not prometheus_server_addr then util.die("L4 prometheus metric should be exposed via export server\n") end local ip_port_to_check = {} local function listen_table_insert(listen_table, scheme, ip, port, enable_http2, enable_ipv6) if type(ip) ~= "string" then util.die(scheme, " listen ip format error, must be string", "\n") end if type(port) ~= "number" then util.die(scheme, " listen port format error, must be number", "\n") end if ports_to_check[port] ~= nil then util.die(scheme, " listen port ", port, " conflicts with ", ports_to_check[port], "\n") end local addr = ip .. ":" .. port if ip_port_to_check[addr] == nil then table_insert(listen_table, {ip = ip, port = port, enable_http2 = enable_http2}) ip_port_to_check[addr] = scheme end if enable_ipv6 then ip = "[::]" addr = ip .. ":" .. port if ip_port_to_check[addr] == nil then table_insert(listen_table, {ip = ip, port = port, enable_http2 = enable_http2}) ip_port_to_check[addr] = scheme end end end local node_listen = {} -- 将http监听地址按照指定格式插入到node_listen,支持多个地址,并且将node_listen重新赋值给yaml_conf.apisix.node_listen if type(yaml_conf.apisix.node_listen) == "number" then listen_table_insert(node_listen, "http", "0.0.0.0", yaml_conf.apisix.node_listen, false, yaml_conf.apisix.enable_ipv6) elseif type(yaml_conf.apisix.node_listen) == "table" then for _, value in ipairs(yaml_conf.apisix.node_listen) do if type(value) == "number" then listen_table_insert(node_listen, "http", "0.0.0.0", value, false, yaml_conf.apisix.enable_ipv6) elseif type(value) == "table" then local ip = value.ip local port = value.port local enable_ipv6 = false local enable_http2 = value.enable_http2 if ip == nil then ip = "0.0.0.0" if yaml_conf.apisix.enable_ipv6 then enable_ipv6 = true end end if port == nil then port = 9080 end if enable_http2 == nil then enable_http2 = false end listen_table_insert(node_listen, "http", ip, port, enable_http2, enable_ipv6) end end end yaml_conf.apisix.node_listen = node_listen local ssl_listen = {} -- 将https监听地址按照指定格式插入到ssl_listen,支持多个地址,并且将ssl_listen重新赋值给yaml_conf.apisix.ssl_listen for _, value in ipairs(yaml_conf.apisix.ssl.listen) do local ip = value.ip local port = value.port local enable_ipv6 = false local enable_http2 = value.enable_http2 if ip == nil then ip = "0.0.0.0" if yaml_conf.apisix.enable_ipv6 then enable_ipv6 = true end end if port == nil then port = 9443 end if enable_http2 == nil then enable_http2 = false end listen_table_insert(ssl_listen, "https", ip, port, enable_http2, enable_ipv6) end yaml_conf.apisix.ssl.listen = ssl_listen -- ssl证书相关设置 if yaml_conf.apisix.ssl.ssl_trusted_certificate ~= nil then local cert_path = yaml_conf.apisix.ssl.ssl_trusted_certificate -- During validation, the path is relative to PWD -- When Nginx starts, the path is relative to conf -- Therefore we need to check the absolute version instead cert_path = pl_path.abspath(cert_path) if not pl_path.exists(cert_path) then util.die("certificate path", cert_path, "doesn't exist\n") end yaml_conf.apisix.ssl.ssl_trusted_certificate = cert_path end -- enable ssl with place holder crt&key yaml_conf.apisix.ssl.ssl_cert = "cert/ssl_PLACE_HOLDER.crt" yaml_conf.apisix.ssl.ssl_cert_key = "cert/ssl_PLACE_HOLDER.key" local tcp_enable_ssl -- stream_proxy相关配置 if yaml_conf.apisix.stream_proxy and yaml_conf.apisix.stream_proxy.tcp then local tcp = yaml_conf.apisix.stream_proxy.tcp for i, item in ipairs(tcp) do if type(item) ~= "table" then tcp[i] = {addr = item} else if item.tls then tcp_enable_ssl = true end end end end -- dubbo-proxy相关配置 local dubbo_upstream_multiplex_count = 32 if yaml_conf.plugin_attr and yaml_conf.plugin_attr["dubbo-proxy"] then local dubbo_conf = yaml_conf.plugin_attr["dubbo-proxy"] if tonumber(dubbo_conf.upstream_multiplex_count) >= 1 then dubbo_upstream_multiplex_count = dubbo_conf.upstream_multiplex_count end end -- dns_resolver相关配置 if yaml_conf.apisix.dns_resolver_valid then if tonumber(yaml_conf.apisix.dns_resolver_valid) == nil then util.die("apisix->dns_resolver_valid should be a number") end end -- proxy_mirror相关配置 local proxy_mirror_timeouts if yaml_conf.plugin_attr["proxy-mirror"] then proxy_mirror_timeouts = yaml_conf.plugin_attr["proxy-mirror"].timeout end local conf_server, err = snippet.generate_conf_server(env, yaml_conf) if err then util.die(err, "\n") end -- 如果是控制平面并且没有adminserver地址配置,就根据现有地址拼接adminserver地址 if yaml_conf.deployment and yaml_conf.deployment.role then local role = yaml_conf.deployment.role env.deployment_role = role if role == "control_plane" and not admin_server_addr then local listen = node_listen[1] admin_server_addr = str_format("%s:%s", listen.ip, listen.port) end end -- 模板渲染所需要的变量 local sys_conf = { lua_path = env.pkg_path_org, lua_cpath = env.pkg_cpath_org, os_name = util.trim(util.execute_cmd("uname")), apisix_lua_home = env.apisix_home, deployment_role = env.deployment_role, use_apisix_base = env.use_apisix_base, error_log = {level = "warn"}, enable_http = enable_http, enabled_discoveries = enabled_discoveries, enabled_plugins = enabled_plugins, enabled_stream_plugins = enabled_stream_plugins, dubbo_upstream_multiplex_count = dubbo_upstream_multiplex_count, tcp_enable_ssl = tcp_enable_ssl, admin_server_addr = admin_server_addr, control_server_addr = control_server_addr, prometheus_server_addr = prometheus_server_addr, proxy_mirror_timeouts = proxy_mirror_timeouts, conf_server = conf_server, } -- 渲染前的检测 if not yaml_conf.apisix then util.die("failed to read `apisix` field from yaml file") end if not yaml_conf.nginx_config then util.die("failed to read `nginx_config` field from yaml file") end -- 使用命令getconf LONG_BIT获取系统位数,根据系统位数给nginx配置worker_rlimit_core进行赋值 if util.is_32bit_arch() then sys_conf["worker_rlimit_core"] = "4G" else sys_conf["worker_rlimit_core"] = "16G" end -- 将配置文件中apisix段下的配置赋值到sys_conf中 for k,v in pairs(yaml_conf.apisix) do sys_conf[k] = v end -- 将配置文件中nginx_config段下的配置赋值到sys_conf中,此段是nginx相关的配置更改 for k,v in pairs(yaml_conf.nginx_config) do sys_conf[k] = v end -- 将将配置文件中deployment.admin下相关配置赋值到sys_conf中 if yaml_conf.deployment.admin then for k,v in pairs(yaml_conf.deployment.admin) do sys_conf[k] = v end end sys_conf["wasm"] = yaml_conf.wasm -- 如果worker_rlimit_nofile配置不存在或者worker_rlimit_nofile值小于worker_connections,就将worker_rlimit_nofile=worker_connections+128 local wrn = sys_conf["worker_rlimit_nofile"] local wc = sys_conf["event"]["worker_connections"] if not wrn or wrn <= wc then -- ensure the number of fds is slightly larger than the number of conn sys_conf["worker_rlimit_nofile"] = wc + 128 end -- 如果是开发模式,就将进程设置为1,将防止惊群的配置设置为false,否则,如果worker_processes没有设置的话,就将worker_processes设置为auto if sys_conf["enable_dev_mode"] == true then sys_conf["worker_processes"] = 1 sys_conf["enable_reuseport"] = false elseif tonumber(sys_conf["worker_processes"]) == nil then sys_conf["worker_processes"] = "auto" end -- 如果dns_resolver没有设置,就从/etc/resolv.conf文件中将nameserver取出来赋值给sys_conf["dns_resolver"] local dns_resolver = sys_conf["dns_resolver"] if not dns_resolver or #dns_resolver == 0 then local dns_addrs, err = local_dns_resolver("/etc/resolv.conf") if not dns_addrs then util.die("failed to import local DNS: ", err, "\n") end if #dns_addrs == 0 then util.die("local DNS is empty\n") end sys_conf["dns_resolver"] = dns_addrs end -- 从dns_resolver中重新编码ipv6的格式,不支持的格式从列表中剔除 for i, r in ipairs(sys_conf["dns_resolver"]) do if r:match(":[^:]*:") then -- more than one colon, is IPv6 if r:byte(1) ~= str_byte('[') then -- ensure IPv6 address is always wrapped in [] sys_conf["dns_resolver"][i] = "[" .. r .. "]" end end -- check if the dns_resolver is ipv6 address with zone_id -- Nginx does not support this form if r:find("%%") then stderr:write("unsupported DNS resolver: " .. r .. ", would ignore this item\n") table_remove(sys_conf["dns_resolver"], i) end end -- 从系统环境变量中获取APISIX_WORKER_PROCESSES,如果存在就将此值赋值给worker_processes local env_worker_processes = getenv("APISIX_WORKER_PROCESSES") if env_worker_processes then sys_conf["worker_processes"] = floor(tonumber(env_worker_processes)) end -- 针对exported变量做相关处理赋值 local exported_vars = file.get_exported_vars() if exported_vars then if not sys_conf["envs"] then sys_conf["envs"]= {} end for _, cfg_env in ipairs(sys_conf["envs"]) do local cfg_name local from = str_find(cfg_env, "=", 1, true) if from then cfg_name = str_sub(cfg_env, 1, from - 1) else cfg_name = cfg_env end exported_vars[cfg_name] = false end for name, value in pairs(exported_vars) do if value then table_insert(sys_conf["envs"], name .. "=" .. value) end end end -- 如果启用了kubernetes的discoveries则注入kubernetes发现shard dict和环境变量 if enabled_discoveries["kubernetes"] then if not sys_conf["discovery_shared_dicts"] then sys_conf["discovery_shared_dicts"] = {} end local kubernetes_conf = yaml_conf.discovery["kubernetes"] local inject_environment = function(conf, envs) local keys = { conf.service.host, conf.service.port, } if conf.client.token then table_insert(keys, conf.client.token) end if conf.client.token_file then table_insert(keys, conf.client.token_file) end for _, key in ipairs(keys) do if #key > 3 then local first, second = str_byte(key, 1, 2) if first == str_byte('$') and second == str_byte('{') then local last = str_byte(key, #key) if last == str_byte('}') then envs[str_sub(key, 3, #key - 1)] = "" end end end end end local envs = {} if #kubernetes_conf == 0 then sys_conf["discovery_shared_dicts"]["kubernetes"] = kubernetes_conf.shared_size inject_environment(kubernetes_conf, envs) else for _, item in ipairs(kubernetes_conf) do sys_conf["discovery_shared_dicts"]["kubernetes-" .. item.id] = item.shared_size inject_environment(item, envs) end end if not sys_conf["envs"] then sys_conf["envs"] = {} end for item in pairs(envs) do table_insert(sys_conf["envs"], item) end end -- 规范lua_path、lua_cpath路径,如果结尾不是;的加上; sys_conf["extra_lua_path"] = get_lua_path(yaml_conf.apisix.extra_lua_path) sys_conf["extra_lua_cpath"] = get_lua_path(yaml_conf.apisix.extra_lua_cpath) -- 根据模版以及sys_conf生成nginx.conf配置文件 local conf_render = template.compile(ngx_tpl) local ngxconf = conf_render(sys_conf) local ok, err = util.write_file(env.apisix_home .. "/conf/nginx.conf", ngxconf) if not ok then util.die("failed to update nginx.conf: ", err, "\n") end end ``` ### 子命令 - etcd初始化 init_etcd > apisix/cli/etcd.lua ``` function _M.init(env, args) -- 针对配置文件做相关检测,如没问题则将配置文件中etcd段赋值给etcd_conf local yaml_conf, err = file.read_yaml_conf(env.apisix_home) if not yaml_conf then util.die("failed to read local yaml config of apisix: ", err) end if not yaml_conf.apisix then util.die("failed to read `apisix` field from yaml file when init etcd") end if yaml_conf.deployment.config_provider ~= "etcd" then return true end if not yaml_conf.etcd then util.die("failed to read `etcd` field from yaml file when init etcd") end local etcd_conf = yaml_conf.etcd -- 如果配置文件中的etcd地址类型为string,就做下处理转换为table类型 if type(yaml_conf.etcd.host) == "string" then yaml_conf.etcd.host = {yaml_conf.etcd.host} end local host_count = #(yaml_conf.etcd.host) -- 获取地址中的协议类型并赋值给scheme local scheme for i = 1, host_count do local host = yaml_conf.etcd.host[i] local fields = util.split(host, "://") if not fields then util.die("malformed etcd endpoint: ", host, "\n") end if not scheme then scheme = fields[1] elseif scheme ~= fields[1] then print([[WARNING: mixed protocols among etcd endpoints]]) end end -- 获取etcd版本,并检查版本是否低于指定版本,如果不低于,即添加到监控主机列表etcd_healthy_hosts local etcd_healthy_hosts = {} for index, host in ipairs(yaml_conf.etcd.host) do local version_url = host .. "/version" local errmsg local res, err local retry_time = 0 local etcd = yaml_conf.etcd local max_retry = tonumber(etcd.startup_retry) or 2 while retry_time < max_retry do res, err = request(version_url, yaml_conf) -- In case of failure, request returns nil followed by an error message. -- Else the first return value is the response body -- and followed by the response status code. if res then break end retry_time = retry_time + 1 print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s", version_url, err, retry_time)) end if res then local body, _, err = dkjson.decode(res) if err or (body and not body["etcdcluster"]) then errmsg = str_format("got malformed version message: \"%s\" from etcd \"%s\"\n", res, version_url) util.die(errmsg) end local cluster_version = body["etcdcluster"] if compare_semantic_version(cluster_version, env.min_etcd_version) then util.die("etcd cluster version ", cluster_version, " is less than the required version ", env.min_etcd_version, ", please upgrade your etcd cluster\n") end table_insert(etcd_healthy_hosts, host) else io_stderr:write(str_format("request etcd endpoint \'%s\' error, %s\n", version_url, err)) end end -- 检查健康主机列表长度是否小于等于0 if #etcd_healthy_hosts <= 0 then util.die("all etcd nodes are unavailable\n") end -- 检查健康主机是否超过集群总主机的一半或以上 if (#etcd_healthy_hosts / host_count * 100) <= 50 then util.die("the etcd cluster needs at least 50% and above healthy nodes\n") end -- etcd开启了grpc但是apisix_base没有启用则报错,否则就看本地是否有安装etcdctl if etcd_conf.use_grpc and not env.use_apisix_base then io_stderr:write("'use_grpc: true' in the etcd configuration " .. "is not supported by vanilla OpenResty\n") end local use_grpc = etcd_conf.use_grpc and env.use_apisix_base if use_grpc then local ok, err = util.execute_cmd("command -v etcdctl") if not ok then util.die("can't find etcdctl: ", err, "\n") end end -- 判断etcd是否准备ok,http使用/v3/auth/authenticate是否响应进行判断 local etcd_ok = false for index, host in ipairs(etcd_healthy_hosts) do if prepare_dirs(use_grpc, yaml_conf, args, index, host, host_count) then etcd_ok = true break end end if not etcd_ok then util.die("none of the configured etcd works well\n") end end ``` ### 子命令 - 停止 stop ``` local function stop(env) -- 清理bak配置文件 cleanup() -- 执行openresty -p APISIX_HOME -c APISIX_HOME/conf/nginx.conf -s stop命令 local cmd = env.openresty_args .. [[ -s stop]] util.execute_cmd(cmd) end ``` ### 子命令 - 停止 quit ``` local function quit(env) -- 清理bak配置文件 cleanup() -- 执行openresty -p APISIX_HOME -c APISIX_HOME/conf/nginx.conf -s quit命令 local cmd = env.openresty_args .. [[ -s quit]] util.execute_cmd(cmd) end ``` ### 子命令 - 停止 restart ``` local function restart(env) -- 测试配置文件,stop后再start test(env) stop(env) start(env) end ``` ### 子命令 - 停止 reload ``` local function reload(env) -- 重新初始化nginx.conf配置文件 init(env) -- 检测配置文件语法是否正确,如果正确则执行openresty -p APISIX_HOME -c APISIX_HOME/conf/nginx.conf -s reload local test_cmd = env.openresty_args .. [[ -t -q ]] -- When success, -- On linux, os.execute returns 0, -- On macos, os.execute returns 3 values: true, exit, 0, and we need the first. local test_ret = execute((test_cmd)) if (test_ret == 0 or test_ret == true) then local cmd = env.openresty_args .. [[ -s reload]] execute(cmd) return end print("test openresty failed") end ``` ### 子命令 - 停止 test ``` local function test(env, backup_ngx_conf) -- 备份 nginx.conf文件 local ngx_conf_path = env.apisix_home .. "/conf/nginx.conf" local ngx_conf_path_bak = ngx_conf_path .. ".bak" local ngx_conf_exist = pl_path.exists(ngx_conf_path) if ngx_conf_exist then local ok, err = os_rename(ngx_conf_path, ngx_conf_path_bak) if not ok then util.die("failed to backup nginx.conf, error: ", err) end end -- 重新生成nginx.conf文件 init(env) -- 检测重新生成的nginx.conf文件语法是否正确 local test_cmd = env.openresty_args .. [[ -t -q ]] local test_ret = execute((test_cmd)) -- 还原老的nginx.conf配置文件 if ngx_conf_exist then local ok, err = os_rename(ngx_conf_path_bak, ngx_conf_path) if not ok then util.die("failed to restore original nginx.conf, error: ", err) end end -- When success, -- On linux, os.execute returns 0, -- On macos, os.execute returns 3 values: true, exit, 0, and we need the first. if (test_ret == 0 or test_ret == true) then print("configuration test is successful") return end util.die("configuration test failed") end ```