// Copyright © 2021 sealos.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
   "fmt"
   "strings"
   "github.com/spf13/cobra"
   "github.com/labring/sealos/pkg/apply"
   "github.com/labring/sealos/pkg/apply/processor"
   "github.com/labring/sealos/pkg/buildah"
   "github.com/labring/sealos/pkg/utils/iputils"
   "github.com/labring/sealos/pkg/utils/logger"
)
var exampleRun = `
create cluster to your baremetal server, appoint the iplist:
   sealos run labring/kubernetes:v1.24.0 --masters 192.168.0.2,192.168.0.3,192.168.0.4 \
      --nodes 192.168.0.5,192.168.0.6,192.168.0.7 --passwd xxx
  multi image:
    sealos run labring/kubernetes:v1.24.0 calico:v3.24.1 \
        --masters 192.168.64.2,192.168.64.22,192.168.64.20 --nodes 192.168.64.21,192.168.64.19
  Specify server InfraSSH port :
  All servers use the same InfraSSH port (default port: 22):
   sealos run labring/kubernetes:v1.24.0 --masters 192.168.0.2,192.168.0.3,192.168.0.4 \
   --nodes 192.168.0.5,192.168.0.6,192.168.0.7 --port 24 --passwd xxx
  Different InfraSSH port numbers exist:
   sealos run labring/kubernetes:v1.24.0 --masters 192.168.0.2,192.168.0.3:23,192.168.0.4:24 \
   --nodes 192.168.0.5:25,192.168.0.6:25,192.168.0.7:27 --passwd xxx
  
  Custom VIP kubernetes cluster:
    sealos run -e defaultVIP=10.103.97.2 labring/kubernetes:v1.24.0 --masters 192.168.0.2,192.168.0.3,192.168.0.4 \
   --nodes 192.168.0.5,192.168.0.6,192.168.0.7 --passwd xxx
  
  Single kubernetes cluster:
   sealos run labring/kubernetes:v1.24.0 --single
  
create a cluster with custom environment variables:
   sealos run -e DashBoardPort=8443 mydashboard:latest  --masters 192.168.0.2,192.168.0.3,192.168.0.4 \
   --nodes 192.168.0.5,192.168.0.6,192.168.0.7 --passwd xxx
`
func newRunCmd() *cobra.Command {
   runArgs := &apply.RunArgs{
      Cluster: &apply.Cluster{},
      SSH:     &apply.SSH{},
   }
   var runSingle bool
   var transport string
   var runCmd = &cobra.Command{
      Use:     "run",
      Short:   "Run cloud native applications with ease, with or without a existing cluster",
      Long:    `sealos run labring/kubernetes:v1.24.0 --masters [arg] --nodes [arg]`,
      Example: exampleRun,
      RunE: func(cmd *cobra.Command, args []string) error {
         if runSingle {
            addr, _ := iputils.ListLocalHostAddrs()
            runArgs.Masters = iputils.LocalIP(addr)
         }
         images, err := args2Images(args, transport)
         if err != nil {
            return err
         }
         applier, err := apply.NewApplierFromArgs(images, runArgs)
         if err != nil {
            return err
         }
         return applier.Apply()
      },
      PreRunE: func(cmd *cobra.Command, args []string) error {
         if err := buildah.ValidateTransport(transport); err != nil {
            return err
         }
         return nil
      },
      PostRun: func(cmd *cobra.Command, args []string) {
         logger.Info(getContact())
      },
   }
   runArgs.RegisterFlags(runCmd.Flags())
   runCmd.Flags().BoolVar(&runSingle, "single", false, "run cluster in single mode")
   runCmd.Flags().BoolVarP(&processor.ForceOverride, "force", "f", false, "force override app in this cluster")
   runCmd.Flags().StringVarP(&transport, "transport", "t", buildah.OCIArchive,
      fmt.Sprintf("load image transport from tar archive file.(optional value: %s, %s)", buildah.OCIArchive, buildah.DockerArchive))
   return runCmd
}
func init() {
   rootCmd.AddCommand(newRunCmd())
}
func args2Images(args []string, transport string) ([]string, error) {
   var images []string
   bder, err := buildah.New("")
   if err != nil {
      return images, err
   }
   for _, arg := range args {
      if strings.HasSuffix(arg, ".tar") || strings.HasSuffix(arg, ".gz") {
         id, err := bder.Load(arg, transport)
         if err != nil {
            return images, err
         }
         images = append(images, id)
      } else {
         images = append(images, arg)
      }
   }
   return images, nil
}
// Copyright © 2021 Alibaba Group Holding Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apply
import (
   "fmt"
   "os"
   "path/filepath"
   "strconv"
   "github.com/labring/sealos/pkg/apply/applydrivers"
   "github.com/labring/sealos/pkg/clusterfile"
   "github.com/labring/sealos/pkg/constants"
   "github.com/labring/sealos/pkg/runtime"
   "github.com/labring/sealos/pkg/ssh"
   v2 "github.com/labring/sealos/pkg/types/v1beta1"
   "github.com/labring/sealos/pkg/utils/iputils"
   "github.com/labring/sealos/pkg/utils/logger"
   stringsutil "github.com/labring/sealos/pkg/utils/strings"
)
type ClusterArgs struct {
   cluster     *v2.Cluster
   hosts       []v2.Host
   clusterName string
}
func NewClusterFromArgs(imageName []string, args *RunArgs) ([]interface{}, error) {
   cluster := initCluster(args.ClusterName)
   c := &ClusterArgs{
      clusterName: args.ClusterName,
      cluster:     cluster,
   }
   if err := c.SetClusterRunArgs(imageName, args); err != nil {
      return nil, err
   }
   kubeadmcfg := &runtime.KubeadmConfig{}
   if err := kubeadmcfg.Merge(""); err != nil {
      return nil, err
   }
   // todo: only generate configurations of the corresponding components by passing parameters
   return []interface{}{c.cluster,
      kubeadmcfg.InitConfiguration,
      kubeadmcfg.ClusterConfiguration,
      kubeadmcfg.JoinConfiguration,
      kubeadmcfg.KubeProxyConfiguration,
      kubeadmcfg.KubeletConfiguration,
   }, nil
}
func NewApplierFromArgs(imageName []string, args *RunArgs) (applydrivers.Interface, error) {
   clusterPath := constants.Clusterfile(args.ClusterName)
   cf := clusterfile.NewClusterFile(clusterPath,
      clusterfile.WithCustomConfigFiles(args.CustomConfigFiles),
      clusterfile.WithCustomEnvs(args.CustomEnv),
   )
   err := cf.Process()
   if err != nil && err != clusterfile.ErrClusterFileNotExists {
      return nil, err
   }
   cluster := cf.GetCluster()
   if cluster == nil {
      logger.Debug("creating new cluster")
      cluster = initCluster(args.ClusterName)
   } else {
      cluster = cluster.DeepCopy()
   }
   c := &ClusterArgs{
      clusterName: cluster.Name,
      cluster:     cluster,
   }
   if err = c.SetClusterRunArgs(imageName, args); err != nil {
      return nil, err
   }
   return applydrivers.NewDefaultApplier(c.cluster, cf, imageName)
}
func NewApplierFromFile(path string, args *Args) (applydrivers.Interface, error) {
   if !filepath.IsAbs(path) {
      pa, err := os.Getwd()
      if err != nil {
         return nil, err
      }
      path = filepath.Join(pa, path)
   }
   Clusterfile := clusterfile.NewClusterFile(path,
      clusterfile.WithCustomValues(args.Values),
      clusterfile.WithCustomSets(args.Sets),
      clusterfile.WithCustomEnvs(args.CustomEnv),
      clusterfile.WithCustomConfigFiles(args.CustomConfigFiles),
   )
   if err := Clusterfile.Process(); err != nil {
      return nil, err
   }
   cluster := Clusterfile.GetCluster()
   if cluster.Name == "" {
      return nil, fmt.Errorf("cluster name cannot be empty, make sure %s file is correct", path)
   }
   localpath := constants.Clusterfile(cluster.Name)
   cf := clusterfile.NewClusterFile(localpath)
   err := cf.Process()
   if err != nil && err != clusterfile.ErrClusterFileNotExists {
      return nil, err
   }
   currentCluster := cf.GetCluster()
   return &applydrivers.Applier{
      ClusterDesired: cluster,
      ClusterFile:    Clusterfile,
      ClusterCurrent: currentCluster,
      RunNewImages:   nil,
   }, nil
}
func (r *ClusterArgs) SetClusterRunArgs(imageList []string, args *RunArgs) error {
   if args.Cluster.ClusterName == "" {
      return fmt.Errorf("cluster name can not be empty")
   }
   //the first run check
   if r.cluster.CreationTimestamp.IsZero() {
      if len(imageList) == 0 {
         return fmt.Errorf("image can not be empty")
      }
      if len(args.Cluster.Masters) == 0 {
         return fmt.Errorf("master ip(s) must specified")
      }
   } else {
      if r.cluster.Status.Phase != v2.ClusterSuccess {
         return fmt.Errorf("cluster status is not %s", v2.ClusterSuccess)
      }
   }
   if err := PreProcessIPList(args.Cluster); err != nil {
      return err
   }
   if args.fs != nil {
      if args.fs.Changed("env") || len(r.cluster.Spec.Env) == 0 {
         r.cluster.Spec.Env = make([]string, len(args.CustomEnv))
         copy(r.cluster.Spec.Env, args.CustomEnv)
      }
      if args.fs.Changed("cmd") || len(r.cluster.Spec.Command) == 0 {
         r.cluster.Spec.Command = make([]string, len(args.CustomCMD))
         copy(r.cluster.Spec.Command, args.CustomCMD)
      }
      if args.fs.Changed("user") || r.cluster.Spec.SSH.User == "" {
         r.cluster.Spec.SSH.User = args.SSH.User
      }
      if args.fs.Changed("pk") || r.cluster.Spec.SSH.Pk == "" {
         r.cluster.Spec.SSH.Pk = args.SSH.Pk
      }
      if args.fs.Changed("pk-passwd") || r.cluster.Spec.SSH.PkPasswd == "" {
         r.cluster.Spec.SSH.PkPasswd = args.SSH.PkPassword
      }
      if args.fs.Changed("port") || r.cluster.Spec.SSH.Port == 0 {
         r.cluster.Spec.SSH.Port = args.SSH.Port
      }
      if args.fs.Changed("passwd") || r.cluster.Spec.SSH.Passwd == "" {
         r.cluster.Spec.SSH.Passwd = args.SSH.Password
      }
   }
   r.cluster.SetNewImages(imageList)
   masters := stringsutil.SplitRemoveEmpty(args.Cluster.Masters, ",")
   nodes := stringsutil.SplitRemoveEmpty(args.Cluster.Nodes, ",")
   r.hosts = []v2.Host{}
   clusterSSH := r.cluster.GetSSH()
   sshClient := ssh.NewSSHClient(&clusterSSH, true)
   if len(masters) > 0 {
      r.setHostWithIpsPort(masters, []string{v2.MASTER, GetHostArch(sshClient, masters[0])})
   }
   if len(nodes) > 0 {
      r.setHostWithIpsPort(nodes, []string{v2.NODE, GetHostArch(sshClient, nodes[0])})
   }
   r.cluster.Spec.Hosts = append(r.cluster.Spec.Hosts, r.hosts...)
   logger.Debug("cluster info: %v", r.cluster)
   return nil
}
func (r *ClusterArgs) SetClusterResetArgs(args *ResetArgs) error {
   if args.Cluster.ClusterName == "" {
      return fmt.Errorf("cluster name can not be empty")
   }
   if err := PreProcessIPList(args.Cluster); err != nil {
      return err
   }
   if args.fs != nil {
      if args.fs.Changed("user") || r.cluster.Spec.SSH.User == "" {
         r.cluster.Spec.SSH.User = args.SSH.User
      }
      if args.fs.Changed("pk") || r.cluster.Spec.SSH.Pk == "" {
         r.cluster.Spec.SSH.Pk = args.SSH.Pk
      }
      if args.fs.Changed("pk-passwd") || r.cluster.Spec.SSH.PkPasswd == "" {
         r.cluster.Spec.SSH.PkPasswd = args.SSH.PkPassword
      }
      if args.fs.Changed("port") || r.cluster.Spec.SSH.Port == 0 {
         r.cluster.Spec.SSH.Port = args.SSH.Port
      }
      if args.fs.Changed("passwd") || r.cluster.Spec.SSH.Passwd == "" {
         r.cluster.Spec.SSH.Passwd = args.SSH.Password
      }
   }
   if len(args.Cluster.Masters) > 0 {
      masters := stringsutil.SplitRemoveEmpty(args.Cluster.Masters, ",")
      nodes := stringsutil.SplitRemoveEmpty(args.Cluster.Nodes, ",")
      r.hosts = []v2.Host{}
      clusterSSH := r.cluster.GetSSH()
      sshClient := ssh.NewSSHClient(&clusterSSH, true)
      r.setHostWithIpsPort(masters, []string{v2.MASTER, GetHostArch(sshClient, masters[0])})
      if len(nodes) > 0 {
         r.setHostWithIpsPort(nodes, []string{v2.NODE, GetHostArch(sshClient, nodes[0])})
      }
      r.cluster.Spec.Hosts = r.hosts
   }
   logger.Debug("cluster info: %v", r.cluster)
   return nil
}
func (r *ClusterArgs) setHostWithIpsPort(ips []string, roles []string) {
   defaultPort := strconv.Itoa(int(r.cluster.Spec.SSH.Port))
   hostMap := map[string]*v2.Host{}
   for i := range ips {
      ip, port := iputils.GetHostIPAndPortOrDefault(ips[i], defaultPort)
      socket := fmt.Sprintf("%s:%s", ip, port)
      if stringsutil.In(socket, r.cluster.GetAllIPS()) {
         continue
      }
      if _, ok := hostMap[port]; !ok {
         hostMap[port] = &v2.Host{IPS: []string{socket}, Roles: roles}
         continue
      }
      hostMap[port].IPS = append(hostMap[port].IPS, socket)
   }
   _, master0Port := iputils.GetHostIPAndPortOrDefault(ips[0], defaultPort)
   for port, host := range hostMap {
      host.IPS = removeIPListDuplicatesAndEmpty(host.IPS)
      if port == master0Port && stringsutil.InList(v2.Master, roles) {
         r.hosts = append([]v2.Host{*host}, r.hosts...)
         continue
      }
      r.hosts = append(r.hosts, *host)
   }
}
/*
Copyright 2022 cuisongliu@qq.com.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apply
import (
   "fmt"
   "path"
   "github.com/spf13/pflag"
   "github.com/labring/sealos/pkg/constants"
)
type Cluster struct {
   Masters     string
   Nodes       string
   ClusterName string
}
func (c *Cluster) RegisterFlags(fs *pflag.FlagSet, verb, action string) {
   fs.StringVar(&c.Masters, "masters", "", fmt.Sprintf("masters to %s", verb))
   fs.StringVar(&c.Nodes, "nodes", "", fmt.Sprintf("nodes to %s", verb))
   fs.StringVar(&c.ClusterName, "cluster", "default", fmt.Sprintf("name of cluster to applied %s action", action))
}
type SSH struct {
   User       string
   Password   string
   Pk         string
   PkPassword string
   Port       uint16
}
func (s *SSH) RegisterFlags(fs *pflag.FlagSet) {
   fs.StringVarP(&s.User, "user", "u", "", "username to authenticate as")
   fs.StringVarP(&s.Password, "passwd", "p", "", "use given password to authenticate with")
   fs.StringVarP(&s.Pk, "pk", "i", path.Join(constants.GetHomeDir(), ".ssh", "id_rsa"),
      "selects a file from which the identity (private key) for public key authentication is read")
   fs.StringVar(&s.PkPassword, "pk-passwd", "", "passphrase for decrypting a PEM encoded private key")
   fs.Uint16Var(&s.Port, "port", 22, "port to connect to on the remote host")
}
type RunArgs struct {
   *Cluster
   *SSH
   CustomEnv         []string
   CustomCMD         []string
   CustomConfigFiles []string
   fs                *pflag.FlagSet
}
func (arg *RunArgs) RegisterFlags(fs *pflag.FlagSet) {
   arg.Cluster.RegisterFlags(fs, "run with", "run")
   arg.SSH.RegisterFlags(fs)
   fs.StringSliceVarP(&arg.CustomEnv, "env", "e", []string{}, "environment variables to set during command execution")
   fs.StringSliceVar(&arg.CustomCMD, "cmd", []string{}, "override CMD directive in images")
   fs.StringSliceVar(&arg.CustomConfigFiles, "config-file", []string{}, "path of custom config files, to use to replace the resource")
   arg.fs = fs
}
type Args struct {
   Values            []string
   Sets              []string
   CustomEnv         []string
   CustomConfigFiles []string
}
func (arg *Args) RegisterFlags(fs *pflag.FlagSet) {
   fs.StringSliceVar(&arg.Values, "values", []string{}, "values file to apply into Clusterfile")
   fs.StringSliceVar(&arg.Sets, "set", []string{}, "set values on the command line")
   fs.StringSliceVar(&arg.CustomEnv, "env", []string{}, "environment variables to set during command execution")
   fs.StringSliceVar(&arg.CustomConfigFiles, "config-file", []string{}, "path of custom config files, to use to replace the resource")
}
type ResetArgs struct {
   *Cluster
   *SSH
   fs *pflag.FlagSet
}
func (arg *ResetArgs) RegisterFlags(fs *pflag.FlagSet) {
   arg.Cluster.RegisterFlags(fs, "be reset", "reset")
   arg.SSH.RegisterFlags(fs)
   arg.fs = fs
}
type ScaleArgs struct {
   *Cluster
}
func (arg *ScaleArgs) RegisterFlags(fs *pflag.FlagSet, verb, action string) {
   arg.Cluster.RegisterFlags(fs, verb, action)
}