docker.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. package docker
  2. import (
  3. "context"
  4. "encoding/json"
  5. "fmt"
  6. "goseg/config"
  7. "goseg/structs"
  8. "log/slog"
  9. "os"
  10. "strings"
  11. "time"
  12. "github.com/docker/docker/api/types"
  13. "github.com/docker/docker/api/types/container"
  14. "github.com/docker/docker/client"
  15. )
  16. var (
  17. logger = slog.New(slog.NewJSONHandler(os.Stdout, nil))
  18. EventBus = make(chan structs.Event, 100)
  19. )
  20. // return the container status of a slice of ships
  21. func GetShipStatus(patps []string) (map[string]string, error) {
  22. statuses := make(map[string]string)
  23. cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
  24. if err != nil {
  25. errmsg := fmt.Sprintf("Error getting Docker info: %v", err)
  26. logger.Error(errmsg)
  27. return statuses, err
  28. } else {
  29. containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
  30. if err != nil {
  31. errmsg := fmt.Sprintf("Error getting containers: %v", err)
  32. logger.Error(errmsg)
  33. return statuses, err
  34. } else {
  35. for _, pier := range patps {
  36. found := false
  37. for _, container := range containers {
  38. for _, name := range container.Names {
  39. fasPier := "/" + pier
  40. if name == fasPier {
  41. statuses[pier] = container.Status
  42. found = true
  43. break
  44. }
  45. }
  46. if found {
  47. break
  48. }
  49. }
  50. if !found {
  51. statuses[pier] = "not found"
  52. }
  53. }
  54. }
  55. return statuses, nil
  56. }
  57. }
  58. // return the name of a container's network
  59. func GetContainerNetwork(name string) (string, error) {
  60. cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
  61. if err != nil {
  62. return "", err
  63. }
  64. defer cli.Close()
  65. containerJSON, err := cli.ContainerInspect(context.Background(), name)
  66. if err != nil {
  67. return "", err
  68. }
  69. for networkName := range containerJSON.NetworkSettings.Networks {
  70. return networkName, nil
  71. }
  72. return "", fmt.Errorf("container is not attached to any network")
  73. }
  74. // return the disk and memory usage for a container
  75. func GetContainerStats(containerName string) (structs.ContainerStats, error) {
  76. var res structs.ContainerStats
  77. cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
  78. if err != nil {
  79. return res, err
  80. }
  81. defer cli.Close()
  82. statsResp, err := cli.ContainerStats(context.Background(), containerName, false)
  83. if err != nil {
  84. return res, err
  85. }
  86. defer statsResp.Body.Close()
  87. var stat types.StatsJSON
  88. if err := json.NewDecoder(statsResp.Body).Decode(&stat); err != nil {
  89. return res, err
  90. }
  91. memUsage := stat.MemoryStats.Usage
  92. inspectResp, err := cli.ContainerInspect(context.Background(), containerName)
  93. if err != nil {
  94. return res, err
  95. }
  96. diskUsage := int64(0)
  97. if inspectResp.SizeRw != nil {
  98. diskUsage = *inspectResp.SizeRw
  99. }
  100. return structs.ContainerStats{
  101. MemoryUsage: memUsage,
  102. DiskUsage: diskUsage,
  103. }, nil
  104. }
  105. // start a container by name + type
  106. // not for booting new ships
  107. func StartContainer(containerName string, containerType string) (structs.ContainerState, error) {
  108. var containerState structs.ContainerState
  109. ctx := context.Background()
  110. cli, err := client.NewClientWithOpts(client.FromEnv)
  111. if err != nil {
  112. return containerState, err
  113. }
  114. // get the desired tag and hash from config
  115. containerInfo, err := GetLatestContainerInfo(containerType)
  116. if err != nil {
  117. return containerState, err
  118. }
  119. // check if container exists
  120. containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
  121. if err != nil {
  122. return containerState, err
  123. }
  124. var existingContainer *types.Container = nil
  125. for _, container := range containers {
  126. for _, name := range container.Names {
  127. if name == "/"+containerName {
  128. existingContainer = &container
  129. break
  130. }
  131. }
  132. if existingContainer != nil {
  133. break
  134. }
  135. }
  136. desiredTag := containerInfo["tag"]
  137. desiredHash := containerInfo["hash"]
  138. desiredRepo := containerInfo["repo"]
  139. desiredImage := fmt.Sprintf("%s:%s@sha256:%s", desiredRepo, desiredTag, desiredHash)
  140. desiredStatus := "running"
  141. if desiredTag == "" || desiredHash == "" {
  142. err = fmt.Errorf("Version info has not been retrieved!")
  143. return containerState, err
  144. }
  145. // check if the desired image is available locally
  146. images, err := cli.ImageList(ctx, types.ImageListOptions{})
  147. if err != nil {
  148. return containerState, err
  149. }
  150. imageExistsLocally := false
  151. for _, img := range images {
  152. for _, tag := range img.RepoTags {
  153. if tag == desiredRepo+":"+desiredTag && img.ID == desiredHash {
  154. imageExistsLocally = true
  155. break
  156. }
  157. }
  158. if imageExistsLocally {
  159. break
  160. }
  161. }
  162. if !imageExistsLocally {
  163. // pull the image if it doesn't exist locally
  164. _, err = cli.ImagePull(ctx, desiredImage, types.ImagePullOptions{})
  165. if err != nil {
  166. return containerState, err
  167. }
  168. }
  169. switch {
  170. case existingContainer == nil:
  171. // if the container does not exist, create and start it
  172. _, err := cli.ContainerCreate(ctx, &container.Config{
  173. Image: desiredImage,
  174. }, nil, nil, nil, containerName)
  175. if err != nil {
  176. return containerState, err
  177. }
  178. err = cli.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
  179. if err != nil {
  180. return containerState, err
  181. }
  182. msg := fmt.Sprintf("%s started with image %s", containerName, desiredImage)
  183. logger.Info(msg)
  184. case existingContainer.State == "exited":
  185. // if the container exists but is stopped, start it
  186. err := cli.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
  187. if err != nil {
  188. return containerState, err
  189. }
  190. msg := fmt.Sprintf("Started stopped container %s", containerName)
  191. logger.Info(msg)
  192. default:
  193. // debug
  194. currentImage := existingContainer.Image
  195. tagAndDigest := strings.Split(currentImage, ":")[1] // This will give "latest@sha256"
  196. currentTag := strings.Split(tagAndDigest, "@")[0] // This will give "latest"
  197. logger.Info(fmt.Sprintf("Current image: %s, Current tag: %s, Desired tag: %s, Current ID: %s, Desired ID: %s",
  198. currentImage, currentTag, desiredTag, existingContainer.ImageID, desiredHash))
  199. //
  200. // if container is running, check the image hash
  201. if existingContainer.ImageID != desiredHash {
  202. // if the hashes don't match, recreate the container with the new one
  203. err := cli.ContainerRemove(ctx, containerName, types.ContainerRemoveOptions{Force: true})
  204. if err != nil {
  205. return containerState, err
  206. }
  207. _, err = cli.ContainerCreate(ctx, &container.Config{
  208. Image: desiredImage,
  209. }, nil, nil, nil, containerName)
  210. if err != nil {
  211. return containerState, err
  212. }
  213. err = cli.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
  214. if err != nil {
  215. return containerState, err
  216. }
  217. msg := fmt.Sprintf("Restarted %s with image %s", containerName, desiredImage)
  218. logger.Info(msg)
  219. } else {
  220. msg := fmt.Sprintf("%s is already running with the correct tag: %s", containerName, desiredTag)
  221. logger.Info(msg)
  222. }
  223. }
  224. containerDetails, err := cli.ContainerInspect(ctx, containerName)
  225. if err != nil {
  226. return containerState, fmt.Errorf("failed to inspect container %s: %v", containerName, err)
  227. }
  228. containerState = structs.ContainerState{
  229. ID: containerDetails.ID, // container id hash
  230. Name: containerName, // name (eg @p)
  231. Image: desiredImage, // full repo:tag@hash string
  232. Type: containerType, // eg `vere` (corresponds with version server label)
  233. DesiredStatus: desiredStatus, // what the user sets
  234. ActualStatus: containerDetails.State.Status, // what the daemon reports
  235. CreatedAt: containerDetails.Created, // this is a string
  236. }
  237. return containerState, err
  238. }
  239. // convert the version info back into json then a map lol
  240. // so we can easily get the correct repo/release channel/tag/hash
  241. func GetLatestContainerInfo(containerType string) (map[string]string, error) {
  242. var res map[string]string
  243. arch := config.Architecture
  244. hashLabel := arch + "_sha256"
  245. versionInfo := config.VersionInfo
  246. jsonData, err := json.Marshal(versionInfo)
  247. if err != nil {
  248. return res, err
  249. }
  250. // Convert JSON to map
  251. var m map[string]interface{}
  252. err = json.Unmarshal(jsonData, &m)
  253. if err != nil {
  254. return res, err
  255. }
  256. containerData, ok := m[containerType].(map[string]interface{})
  257. if !ok {
  258. return nil, fmt.Errorf("%s data is not a map", containerType)
  259. }
  260. tag, ok := containerData["tag"].(string)
  261. if !ok {
  262. return nil, fmt.Errorf("'tag' is not a string")
  263. }
  264. hashValue, ok := containerData[hashLabel].(string)
  265. if !ok {
  266. return nil, fmt.Errorf("'%s' is not a string", hashLabel)
  267. }
  268. repo, ok := containerData["repo"].(string)
  269. if !ok {
  270. return nil, fmt.Errorf("'repo' is not a string")
  271. }
  272. res = make(map[string]string)
  273. res["tag"] = tag
  274. res["hash"] = hashValue
  275. res["repo"] = repo
  276. return res, nil
  277. }
  278. // stop a container with the name
  279. func StopContainerByName(containerName string) error {
  280. ctx := context.Background()
  281. cli, err := client.NewClientWithOpts(client.FromEnv)
  282. if err != nil {
  283. return err
  284. }
  285. // fetch all containers incl stopped
  286. containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
  287. if err != nil {
  288. return err
  289. }
  290. for _, cont := range containers {
  291. for _, name := range cont.Names {
  292. if name == "/"+containerName {
  293. // Stop the container
  294. options := container.StopOptions{}
  295. if err := cli.ContainerStop(ctx, cont.ID, options); err != nil {
  296. return fmt.Errorf("failed to stop container %s: %v", containerName, err)
  297. }
  298. logger.Info(fmt.Sprintf("Successfully stopped container %s\n", containerName))
  299. return nil
  300. }
  301. }
  302. }
  303. return fmt.Errorf("container with name %s not found", containerName)
  304. }
  305. // subscribe to docker events and feed them into eventbus
  306. func DockerListener() {
  307. ctx := context.Background()
  308. cli, err := client.NewClientWithOpts(client.FromEnv)
  309. if err != nil {
  310. logger.Error(fmt.Sprintf("Error initializing Docker client: %v", err))
  311. return
  312. }
  313. messages, errs := cli.Events(ctx, types.EventsOptions{})
  314. for {
  315. select {
  316. case event := <-messages:
  317. // Convert the Docker event to our custom event and send it to the EventBus
  318. EventBus <- structs.Event{Type: event.Action, Data: event}
  319. case err := <-errs:
  320. logger.Error(fmt.Sprintf("Docker event error: %v", err))
  321. }
  322. }
  323. }
  324. // periodically poll docker in case we miss something
  325. func DockerPoller() {
  326. ticker := time.NewTicker(10 * time.Second)
  327. for {
  328. select {
  329. case <-ticker.C:
  330. logger.Info("polling docker")
  331. // fetch the status of all containers and compare with app's state
  332. // if there's a change, send an event to the EventBus
  333. return
  334. }
  335. }
  336. }