diff --git a/config/config.go b/config/config.go index 4006668..02c1a49 100644 --- a/config/config.go +++ b/config/config.go @@ -1,22 +1,18 @@ package config import ( - "errors" "fmt" - "github.com/alecthomas/gometalinter/_linters/src/gopkg.in/yaml.v2" - "io/ioutil" - "os/user" - "path/filepath" - "github.com/aau-network-security/sandbox/virtual/docker" + "github.com/alecthomas/gometalinter/_linters/src/gopkg.in/yaml.v2" dockerclient "github.com/fsouza/go-dockerclient" "github.com/rs/zerolog/log" + "io/ioutil" ) type Config struct { - VmConfig VmConfig `yaml:"vm-config"` - WireguardService WgConnConf `yaml:"wireguard-service"` - DefatConfig DefattConf `yaml:"defat-config"` + VmConfig VmConfig `yaml:"vm-config"` + //WireguardService WgConnConf `yaml:"wireguard-service"` + //DefatConfig DefattConf `yaml:"defat-config"` DockerRepositories []dockerclient.AuthConfiguration `yaml:"docker-repositories"` } @@ -25,30 +21,30 @@ type VmConfig struct { //OvaDir string `yaml:"ova-test"` //use for local test } -type DefattConf struct { - Endpoint string `yaml:"endpoint"` - Port uint64 `yaml:"port"` - SigningKey string `yaml:"sign-key"` - UsersFile string `yaml:"users-file"` - CertConf CertificateConfig `yaml:"tls"` -} +//type DefattConf struct { +// Endpoint string `yaml:"endpoint"` +// Port uint64 `yaml:"port"` +// SigningKey string `yaml:"sign-key"` +// UsersFile string `yaml:"users-file"` +// CertConf CertificateConfig `yaml:"tls"` +//} -type WgConnConf struct { - Endpoint string `yaml:"endpoint"` - Port uint64 `yaml:"port"` - AuthKey string `yaml:"auth-key"` - SignKey string `yaml:"sign-key"` - Dir string `yaml:"client-conf-dir"` - CertConf CertificateConfig `yaml:"tls"` -} - -type CertificateConfig struct { - Enabled bool `yaml:"enabled"` - Directory string `yaml:"directory"` - CertFile string `yaml:"certfile"` - CertKey string `yaml:"certkey"` - CAFile string `yaml:"cafile"` -} +//type WgConnConf struct { +// Endpoint string `yaml:"endpoint"` +// Port uint64 `yaml:"port"` +// AuthKey string `yaml:"auth-key"` +// SignKey string `yaml:"sign-key"` +// Dir string `yaml:"client-conf-dir"` +// CertConf CertificateConfig `yaml:"tls"` +//} +// +//type CertificateConfig struct { +// Enabled bool `yaml:"enabled"` +// Directory string `yaml:"directory"` +// CertFile string `yaml:"certfile"` +// CertKey string `yaml:"certkey"` +// CAFile string `yaml:"cafile"` +//} func NewConfig(path string) (*Config, error) { f, err := ioutil.ReadFile(path) @@ -70,16 +66,16 @@ func NewConfig(path string) (*Config, error) { if c.VmConfig.OvaDir == "" { return nil, fmt.Errorf("Specify vm directory, err: %v", err) } - - if c.WireguardService.CertConf.Enabled { - if c.WireguardService.CertConf.Directory == "" { - usr, err := user.Current() - if err != nil { - return nil, errors.New("Invalid user") - } - c.WireguardService.CertConf.Directory = filepath.Join(usr.HomeDir, ".local", "share", "certmagic") - } - } + // + //if c.WireguardService.CertConf.Enabled { + // if c.WireguardService.CertConf.Directory == "" { + // usr, err := user.Current() + // if err != nil { + // return nil, errors.New("Invalid user") + // } + // c.WireguardService.CertConf.Directory = filepath.Join(usr.HomeDir, ".local", "share", "certmagic") + // } + //} return &c, nil } diff --git a/config/config.yml b/config/config.yml index ee95873..780cf65 100644 --- a/config/config.yml +++ b/config/config.yml @@ -1,28 +1,28 @@ vm-config: - ova-dir: /home/vlad/frondends + ova-dir: /home/vlad/front # ova-test: /Users/rvm/Downloads/AAUJOB/NAP/2021/defatt/virtual/vbox/ -wireguard-service: - endpoint: localhost - port: 5353 - auth-key: deneme - sign-key: test - client-conf-dir: /etc/wireguard/ - tls: - enabled: false - certfile: - certkey: - cafile: +#wireguard-service: +# endpoint: localhost +# port: 5353 +# auth-key: deneme +# sign-key: test +# client-conf-dir: /etc/wireguard/ +# tls: +# enabled: false +# certfile: +# certkey: +# cafile: -defat-config: - endpoint: localhost - port: 5454 - tls: - enabled: false - certFile: - certKey: - caFile: - sign-key: - users-file: /Users/rvm/Downloads/AAUJOB/NAP/2021/defatt/config/users.yml +#defat-config: +# endpoint: localhost +# port: 5454 +# tls: +# enabled: false +# certFile: +# certKey: +# caFile: +# sign-key: +# users-file: /Users/rvm/Downloads/AAUJOB/NAP/2021/defatt/config/users.yml docker-repositories: - username: hknd diff --git a/game/environment.go b/game/environment.go deleted file mode 100644 index 41be9f7..0000000 --- a/game/environment.go +++ /dev/null @@ -1,694 +0,0 @@ -package game - -import ( - "context" - "errors" - "fmt" - "github.com/aau-network-security/openvswitch/ovs" - "github.com/aau-network-security/sandbox/dnet/dns" - "github.com/aau-network-security/sandbox/models" - "strconv" - "strings" - "sync" - "time" - - "github.com/aau-network-security/sandbox/controller" - "github.com/aau-network-security/sandbox/dnet/dhcp" - dhproto "github.com/aau-network-security/sandbox/dnet/dhcp/proto" - "github.com/aau-network-security/sandbox/dnet/wg" - vpn "github.com/aau-network-security/sandbox/dnet/wg/proto" - //"github.com/aau-network-security/sandbox/models" - "github.com/aau-network-security/sandbox/store" - "github.com/aau-network-security/sandbox/virtual" - "github.com/aau-network-security/sandbox/virtual/docker" - "github.com/aau-network-security/sandbox/virtual/vbox" - "github.com/rs/zerolog/log" -) - -var ( - redListenPort uint = 5181 - blueListenPort uint = 5182 - min = 7900 - max = 7950 - gmin = 5350 - gmax = 5375 - smin = 3000 - smax = 3500 - rmin = 5000 - rmax = 5300 - - ErrVMNotCreated = errors.New("no VM created") - ErrGettingContainerID = errors.New("could not get container ID") -) - -type environment struct { - // challenge microservice should be integrated heres - controller controller.NetController - wg vpn.WireguardClient - dhcp dhproto.DHCPClient - dockerHost docker.Host - instances []virtual.Instance - ports []string - vlib vbox.Library - dnsServer *dns.Server -} - -type GameConfig struct { - ID string - Scenario store.Scenario - Name string - Tag string - WgConfig wg.WireGuardConfig - Host string - env *environment - NetworksIP map[string]string - redVPNIp string - blueVPNIp string - redPort uint - bluePort uint - CreatedAt time.Time - RedPanicLeft uint - BluePanicLeft uint -} - -type VPNConfig struct { - PeerIP string - PrivateKeyClient string - ServerPublicKey string - AllowedIPs string - Endpoint string -} - -func NewEnvironment(conf *GameConfig, vlib vbox.Library) (*GameConfig, error) { - - netController := controller.New() - netController.IPPool = controller.NewIPPoolFromHost() - - dockerHost := docker.NewHost() - - env := &environment{ - controller: *netController, - dockerHost: dockerHost, - vlib: vlib, - } - - conf.env = env - - log.Info().Msgf("New environment initialized ") - return conf, nil -} - -func (gc *GameConfig) CloseGame(ctx context.Context) error { - var waitg sync.WaitGroup - var failed bool - - log.Info().Str("Game Name", gc.Name).Str("Game Tag", gc.Tag).Msg("Stopping game") - for _, instance := range gc.env.instances { - waitg.Add(1) - go func(vi virtual.Instance) { - defer waitg.Done() - if err := vi.Stop(); err != nil { - log.Error().Str("Instance Type", vi.Info().Type).Str("Instance Name", vi.Info().Id).Msg("failed to stop virtual instance") - failed = true - } - log.Debug().Str("Instance Type", vi.Info().Type).Str("Instance Name", vi.Info().Id).Msg("stopped instance") - if err := vi.Close(); err != nil { - log.Error().Str("Instance Type", vi.Info().Type).Str("Instance Name", vi.Info().Id).Msg("failed to close virtual instance") - failed = true - } - - if vi.Info().Type == "docker" { - if err := gc.env.controller.Ovs.Docker.DeletePorts(gc.Tag, vi.Info().Id); err != nil { - log.Error().Str("Instance Name", vi.Info().Id).Msg("Deleted all ports on docker image") - failed = true - } - } - log.Debug().Str("Instance Type", vi.Info().Type).Str("Instance Name", vi.Info().Id).Msg("closed instance") - }(instance) - - } - waitg.Wait() - if failed { - return errors.New("failed to stop an virtual instance") - } - - if err := gc.env.removeNetworks(gc.Tag); err != nil { - return errors.New("failed to remove networks") - } - - return nil -} - -func (gc *GameConfig) StartGame(ctx context.Context, tag, name string, scenario store.Scenario) error { - - var ipMail, ipDC string - - log.Info().Str("Game Tag", tag). - Str("Game Name", name). - Str("Scenario", scenario.Name). - Msg("starting game") - - log.Debug().Str("Game", name).Str("bridgeName", tag).Msg("creating openvswitch bridge") - if err := gc.env.initializeOVSBridge(tag); err != nil { - return err - } - - log.Debug().Str("Game", name).Int("Networks", len(scenario.Networks)).Msg("Creating networks") - if err := gc.env.createNetworks(tag, scenario.Networks); err != nil { - return err - } - - log.Debug().Str("Game", name).Msg("configuring monitoring") - if err := gc.env.configureMonitor(ctx, tag, scenario.Networks); err != nil { - log.Error().Err(err).Msgf("configuring monitoring") - return err - } - - var vlanPorts []string - for _, network := range scenario.Networks { - vlanPorts = append(vlanPorts, fmt.Sprintf("%s_%s", tag, network.Name)) - } - vlanPorts = append(vlanPorts, fmt.Sprintf("%s_monitoring", tag)) - - log.Debug().Str("Game", tag).Msgf("Initilizing VPN VM") - - //assign connection port to RED users - redTeamVPNPort := getRandomPort(min, max) - - // assign grpc port to wg vms - wgPort := getRandomPort(gmin, gmax) - - routerPort := getRandomPort(rmin, rmax) - - //assign connection port to Blue users - blueTeamVPNPort := getRandomPort(min, max) - - if err := gc.env.initWireguardVM(ctx, tag, vlanPorts, redTeamVPNPort, blueTeamVPNPort, wgPort, routerPort); err != nil { - - return err - } - - log.Debug().Str("Game", name).Msg("waiting for wireguard vm to boot") - - dhcpClient, err := dhcp.NewDHCPClient(ctx, gc.WgConfig, wgPort) - if err != nil { - log.Error().Err(err).Msg("connecting to DHCP service") - return err - } - - gc.env.dhcp = dhcpClient - - log.Debug().Str("Game ", name).Msg("starting DHCP server") - - gc.NetworksIP, err, ipMail, ipDC = gc.env.initDHCPServer(ctx, len(scenario.Networks), scenario) - if err != nil { - return err - } - - log.Debug().Str("Game ", name).Msg("starting DNS server") - - if err := gc.env.initDNSServer(ctx, tag, gc.NetworksIP, scenario, ipMail, ipDC); err != nil { - log.Error().Err(err).Msg("connecting to DHCP service") - return err - } - - wgClient, err := wg.NewGRPCVPNClient(ctx, gc.WgConfig, wgPort) - if err != nil { - log.Error().Err(err).Msg("connecting to wireguard service") - return err - } - gc.env.wg = wgClient - - log.Debug().Str("Game", name).Msg("initializing scenario") - if err := gc.env.initializeScenario(ctx, tag, scenario); err != nil { - return err - } - - ethInterfaceName := "eth0" // can be customized later - - redTeamVPNIp, err := gc.env.getRandomIp() - if err != nil { - log.Error().Err(err).Msg("Problem in generating red team VPNip") - return err - } - - gc.redVPNIp = fmt.Sprintf("%s.0/24", redTeamVPNIp) - //Assigning a connection port for Red team - - gc.redPort = redTeamVPNPort - - // create wireguard interface for red team - wgNICred := fmt.Sprintf("%s_red", tag) - - // initializing VPN endpoint for red team - if err := gc.env.initVPNInterface(gc.redVPNIp, redListenPort, wgNICred, ethInterfaceName); err != nil { - return err - } - - blueTeamVPNIp, err := gc.env.getRandomIp() - if err != nil { - log.Error().Err(err).Msg("") - return err - } - - gc.blueVPNIp = fmt.Sprintf("%s.0/24", blueTeamVPNIp) - - //Assigning a connection port for blue team - - gc.bluePort = blueTeamVPNPort - // initializing VPN endpoint for blue team - - //create wireguard interface for blue team - wgNICblue := fmt.Sprintf("%s_blue", tag) - - if err := gc.env.initVPNInterface(gc.blueVPNIp, blueListenPort, wgNICblue, ethInterfaceName); err != nil { - return err - } - - macAddress := "04:d3:b0:9b:ea:d6" - macAddressClean := strings.ReplaceAll(macAddress, ":", "") - - log.Debug().Str("game", tag).Msg("Initalizing SoC") - socPort := getRandomPort(smin, smax) - ifaces := []string{fmt.Sprintf("%s_monitoring", tag), fmt.Sprintf("%s_AllBlue", tag)} - if err := gc.env.initializeSOC(ctx, ifaces, macAddressClean, tag, 2, socPort); err != nil { - log.Error().Err(err).Str("game", tag).Msg("starting SoC vm") - return err - } - - log.Info().Str("Game Tag", tag). - Str("Game Name", name). - Msg("started game") - - return nil -} - -func (env *environment) initVPNInterface(ipAddress string, port uint, vpnInterfaceName, ethInterface string) error { - - // ipAddress should be in this format : "45.11.23.1/24" - // port should be unique per interface - - _, err := env.wg.InitializeI(context.Background(), &vpn.IReq{ - Address: ipAddress, - ListenPort: uint32(port), - SaveConfig: true, - Eth: ethInterface, - IName: vpnInterfaceName, - DownInterfacesFile: "/etc/network/downinterfaces", - }) - if err != nil { - log.Error().Msgf("Error in initializing interface %v", err) - return err - } - return nil -} - -func (env *environment) initDHCPServer(ctx context.Context, numberNetworks int, scenario store.Scenario) (map[string]string, error, string, string) { - var networks []*dhproto.Network - var staticHosts []*dhproto.StaticHost - var ipMail, ipDC string - - ipList := make(map[string]string) - - for i := 1; i <= numberNetworks; i++ { - var network dhproto.Network - randIP, _ := env.controller.IPPool.Get() - network.Network = randIP + ".0" - network.Min = randIP + ".6" - network.Max = randIP + ".250" - network.Router = randIP + ".1" - - ipList[fmt.Sprintf("%d", 10*i)] = randIP + ".0/24" - network.DnsServer = randIP + ".2" - networks = append(networks, &network) - - } - - // Setup monitoring network - - monitoringNet := dhproto.Network{ - Network: "10.10.10.0", - Min: "10.10.10.6", - Max: "10.10.10.199", - Router: "10.10.10.1", - DnsServer: "10.10.10.2", - } - ipList[""] = "10.10.10.0/24" - - networks = append(networks, &monitoringNet) - //Todo: This is scenario based method to make it work - // in future this needs to be scenario indenpent - - for _, item := range scenario.Hosts { - - //cast la string acum e lista de stringuri - if item.Name == "mailserver" { - - ipMail = ConstructStaticIP(ipList, item.Networks, item.IPAddr) - host := dhproto.StaticHost{ - Name: item.Name, - - MacAddress: "04:d3:04:54:fe:15", - Address: ipMail, - Router: ConstructStaticIP(ipList, item.Networks, ".1"), - DomainName: fmt.Sprintf("\"%s\"", item.DNS), - DnsServer: ConstructStaticIP(ipList, item.Networks, ".2"), - } - - staticHosts = append(staticHosts, &host) - continue - - } else if item.Name == "DCcon" { - fmt.Printf("Este in bucla cu DCcon \n") - - ipDC = ConstructStaticIP(ipList, item.Networks, item.IPAddr) - fmt.Printf("DCcon IP: %s\n", ipDC) - - host := dhproto.StaticHost{ - Name: item.Name, - MacAddress: "04:d3:b0:c7:57:c7", - Address: ipDC, - Router: ConstructStaticIP(ipList, item.Networks, ".1"), - DomainName: fmt.Sprintf("\"%s\"", item.DNS), - DnsServer: ConstructStaticIP(ipList, item.Networks, ".2"), - } - staticHosts = append(staticHosts, &host) - } else { - fmt.Printf("Este in bucla cu Else. \n") - continue - } - - } - - host := dhproto.StaticHost{ - Name: "SOC", - MacAddress: "04:d3:b0:9b:ea:d6", - Address: "10.10.10.200", - Router: "10.10.10.1", - DomainName: "\"blue.monitor.soc\"", - DnsServer: "10.10.10.2", - } - - staticHosts = append(staticHosts, &host) - - _, err := env.dhcp.StartDHCP(ctx, &dhproto.StartReq{Networks: networks, StaticHosts: staticHosts}) - if err != nil { - return ipList, err, ipMail, ipDC - } - - return ipList, nil, ipMail, ipDC -} - -func (env *environment) initDNSServer(ctx context.Context, bridge string, ipList map[string]string, scenario store.Scenario, IPMail, IPdc string) error { - - server, err := dns.New(bridge, ipList, scenario, IPMail, IPdc) - if err != nil { - log.Error().Msgf("Error creating DNS server %v", err) - return err - } - env.dnsServer = server - //env.instances = append(env.instances, server ) - - if err := server.Run(ctx); err != nil { - log.Error().Msgf("Error in starting DNS %v", err) - return err - } - - contID := server.Container().ID() - fmt.Printf("AICI e ID = %s\n", contID) - - i := 1 - for _, network := range ipList { - - if network == "10.10.10.0/24" { - - ipAddrs := strings.TrimSuffix(network, ".0/24") - ipAddrs = ipAddrs + ".2/24" - - fmt.Println(ipAddrs) - - if err := env.controller.Ovs.Docker.AddPort(bridge, fmt.Sprintf("eth%d", i), contID, ovs.DockerOptions{IPAddress: ipAddrs}); err != nil { - - log.Error().Err(err).Str("container", contID).Msg("adding port to DNS container") - return err - } - i++ - fmt.Println(i) - - } else { - ipAddrs := strings.TrimSuffix(network, ".0/24") - ipAddrs = ipAddrs + ".2/24" - - fmt.Println(ipAddrs) - //fmt.Sprintf("eth%d", vlan) - tag := i * 10 - - sTag := strconv.Itoa(tag) - - fmt.Println(sTag) - if err := env.controller.Ovs.Docker.AddPort(bridge, fmt.Sprintf("eth%d", i), contID, ovs.DockerOptions{VlanTag: sTag, IPAddress: ipAddrs}); err != nil { - - log.Error().Err(err).Str("container", contID).Msg("adding port to DNS container") - return err - } - i++ - fmt.Println(i) - - } - - } - - return nil -} - -//configureMonitor will configure the monitoring VM by attaching the correct interfaces -func (env *environment) configureMonitor(ctx context.Context, bridge string, nets []models.Network) error { - - log.Info().Str("game tag", bridge).Msg("creating monitoring network") - if err := env.createPort(bridge, "monitoring", 0); err != nil { - return err - } - - mirror := fmt.Sprintf("%s_mirror", bridge) - - log.Info().Str("game tag", bridge).Msg("Creating the network mirror") - if err := env.controller.Ovs.VSwitch.CreateMirrorforBridge(mirror, bridge); err != nil { - log.Error().Err(err).Msg("creating mirror") - return err - } - - if err := env.createPort(bridge, "AllBlue", 0); err != nil { - return err - } - - portUUID, err := env.controller.Ovs.VSwitch.GetPortUUID(fmt.Sprintf("%s_AllBlue", bridge)) - if err != nil { - log.Error().Err(err).Str("port", fmt.Sprintf("%s_AllBlue", bridge)).Msg("getting port uuid") - return err - } - - var vlans []string - for _, network := range nets { - vlans = append(vlans, fmt.Sprint(network.Tag)) - } - - if err := env.controller.Ovs.VSwitch.MirrorAllVlans(mirror, portUUID, vlans); err != nil { - log.Error().Err(err).Msgf("mirroring traffic") - return err - } - - return nil -} - -func (env *environment) initializeSOC(ctx context.Context, networks []string, mac string, tag string, nic int, socPort uint) error { - - //TODO: Add random port here | soc - - //TODO: Solve problem with the soc ovaFile - vm, err := env.vlib.GetCopy(ctx, tag, - vbox.InstanceConfig{Image: "soc2022.ova", - CPU: 4, - MemoryMB: 32384}, - vbox.MapVMPort([]virtual.NatPortSettings{ - { - HostPort: strconv.FormatUint(uint64(socPort), 10), - GuestPort: "22", - ServiceName: "sshd", - Protocol: "tcp", - }, - }), - // SetBridge parameter cleanFirst should be enabled when wireguard/router instance - // is attaching to openvswitch network - vbox.SetBridge(networks, false), - vbox.SetMAC(mac, nic), - ) - - if err != nil { - log.Error().Err(err).Msg("creating copy of SoC VM") - return err - } - if vm == nil { - return ErrVMNotCreated - } - log.Debug().Str("VM", vm.Info().Id).Msg("starting VM") - - if err := vm.Start(ctx); err != nil { - log.Error().Err(err).Msgf("starting virtual machine") - return err - } - env.instances = append(env.instances, vm) - - return nil -} - -func (env *environment) initWireguardVM(ctx context.Context, tag string, vlanPorts []string, redTeamVPNport, blueTeamVPNport, wgPort uint, routerPort uint) error { - - vm, err := env.vlib.GetCopy(ctx, - tag, - vbox.InstanceConfig{Image: "Routerfix.ova", - CPU: 2, - MemoryMB: 2048}, - vbox.MapVMPort([]virtual.NatPortSettings{ - { - // this is for gRPC service - HostPort: strconv.FormatUint(uint64(wgPort), 10), - GuestPort: "5353", - ServiceName: "wgservice", - Protocol: "tcp", - }, - { - HostPort: strconv.FormatUint(uint64(redTeamVPNport), 10), - GuestPort: strconv.FormatUint(uint64(redListenPort), 10), - ServiceName: "wgRedConnection", - Protocol: "udp", - }, - { - HostPort: strconv.FormatUint(uint64(blueTeamVPNport), 10), - GuestPort: strconv.FormatUint(uint64(blueListenPort), 10), - ServiceName: "wgBlueConnection", - Protocol: "udp", - }, - { - HostPort: strconv.FormatUint(uint64(routerPort), 10), - GuestPort: "22", - ServiceName: "sshd", - Protocol: "tcp", - }, - }), - // SetBridge parameter cleanFirst should be enabled when wireguard/router instance - // is attaching to openvswitch network - vbox.SetBridge(vlanPorts, false), - ) - - if err != nil { - log.Error().Err(err).Msg("creating VPN VM") - return err - } - if vm == nil { - return ErrVMNotCreated - } - log.Debug().Str("VM", vm.Info().Id).Msg("starting VM") - - if err := vm.Start(ctx); err != nil { - log.Error().Err(err).Msgf("starting virtual machine") - return err - } - env.instances = append(env.instances, vm) - - return nil -} - -func (gc *GameConfig) CreateVPNConfig(ctx context.Context, isRed bool, idUser string) (VPNConfig, error) { - - var nicName string - - var allowedIps []string - var peerIP string - var endpoint string - //var dns string - if isRed { - //dns = "" - nicName = fmt.Sprintf("%s_red", gc.Tag) - - for key := range gc.NetworksIP { - if gc.NetworksIP[key] == "10.10.10.0/24" { - continue - } - allowedIps = append(allowedIps, gc.NetworksIP[key]) - break - } - - peerIP = gc.redVPNIp - allowedIps = append(allowedIps, peerIP) - - endpoint = fmt.Sprintf("%s.%s:%d", gc.Tag, gc.Host, gc.redPort) - } else { - - nicName = fmt.Sprintf("%s_blue", gc.Tag) - for key := range gc.NetworksIP { - allowedIps = append(allowedIps, gc.NetworksIP[key]) - } - - peerIP = gc.blueVPNIp - allowedIps = append(allowedIps, peerIP) - endpoint = fmt.Sprintf("%s.%s:%d", gc.Tag, gc.Host, gc.bluePort) - } - - serverPubKey, err := gc.env.wg.GetPublicKey(ctx, &vpn.PubKeyReq{PubKeyName: nicName, PrivKeyName: nicName}) - if err != nil { - log.Error().Err(err).Str("User", idUser).Msg("Err get public nicName wireguard") - return VPNConfig{}, err - } - - _, err = gc.env.wg.GenPrivateKey(ctx, &vpn.PrivKeyReq{PrivateKeyName: gc.Tag + "_" + idUser + "_"}) - if err != nil { - //fmt.Printf("Err gen private nicName wireguard %v", err) - log.Error().Err(err).Str("User", idUser).Msg("Err gen private nicName wireguard") - return VPNConfig{}, err - } - - //generate client public nicName - //log.Info().Msgf("Generating public nicName for team %s", evTag+"_"+team+"_"+strconv.Itoa(ipAddr)) - _, err = gc.env.wg.GenPublicKey(ctx, &vpn.PubKeyReq{PubKeyName: gc.Tag + "_" + idUser + "_", PrivKeyName: gc.Tag + "_" + idUser + "_"}) - if err != nil { - log.Error().Err(err).Str("User", idUser).Msg("Err gen public nicName client") - return VPNConfig{}, err - } - - clientPubKey, err := gc.env.wg.GetPublicKey(ctx, &vpn.PubKeyReq{PubKeyName: gc.Tag + "_" + idUser + "_"}) - if err != nil { - fmt.Printf("Error on GetPublicKey %v", err) - return VPNConfig{}, err - } - - pIP := fmt.Sprintf("%d/32", IPcounter()) - - peerIP = strings.Replace(peerIP, "0/24", pIP, 1) - - _, err = gc.env.wg.AddPeer(ctx, &vpn.AddPReq{ - Nic: nicName, - AllowedIPs: peerIP, - PublicKey: clientPubKey.Message, - }) - - if err != nil { - log.Error().Err(err).Msg("Error on adding peer to interface") - return VPNConfig{}, err - - } - - clientPrivKey, err := gc.env.wg.GetPrivateKey(ctx, &vpn.PrivKeyReq{PrivateKeyName: gc.Tag + "_" + idUser + "_"}) - if err != nil { - log.Error().Err(err).Msg("getting priv NIC") - return VPNConfig{}, err - } - - return VPNConfig{ - ServerPublicKey: serverPubKey.Message, - PrivateKeyClient: clientPrivKey.Message, - Endpoint: endpoint, - AllowedIPs: strings.Join(allowedIps, ", "), - PeerIP: peerIP, - }, nil - -} diff --git a/main/main.go b/main/main.go index 2320ddb..b8ab3b0 100644 --- a/main/main.go +++ b/main/main.go @@ -1,67 +1,82 @@ package main import ( + "context" "flag" + "github.com/aau-network-security/sandbox/store" + + //"flag" "fmt" "github.com/aau-network-security/sandbox/config" "github.com/aau-network-security/sandbox/sandbox" + //"github.com/aau-network-security/sandbox2/models" + //"github.com/aau-network-security/sandbox2/sandbox2" + //"github.com/aau-network-security/sandbox2/store" + //"github.com/docker/docker/integration-cli/environment" + //"github.com/google/uuid" "github.com/rs/zerolog/log" "os" + //"time" ) func main() { + //TODO: add the target vm localtion and specific VM + //var ( + // tag string + // vmsName string + // targetVM string + // networksNo int + //) - var ( - tag string - vmsName string - targetVM string - networksNo int - ) - - flag.StringVar(&tag, "tag", "test", "name of experiment") - flag.StringVar(&vmsName, "vmsName", "ubuntu.ova", "name of the target machine") - flag.StringVar(&targetVM, "targetVM", "ubuntu.ova", "name for rest of virtual machines") - flag.IntVar(&networksNo, "networksNo", 3, "number of networks") - - flag.Parse() + // flag.StringVar(&tag, "tag", "test", "name of experiment") + // flag.StringVar(&vmsName, "vmsName", "ubuntu.ova", "name of the target machine") + // flag.StringVar(&targetVM, "targetVM", "ubuntu.ova", "name for rest of virtual machines") + // flag.IntVar(&networksNo, "networksNo", 3, "number of networks") + // + // flag.Parse() + // + // fmt.Println("tag:", tag) + // fmt.Println("networksNo:", networksNo) + // fmt.Println("vm:", vmsName) + // fmt.Println("vm:", targetVM) - fmt.Println("tag:", tag) - fmt.Println("networksNo:", networksNo) - fmt.Println("vm:", vmsName) - fmt.Println("vm:", targetVM) + var defaultScenarioFile = "scenarios" dir, err := os.Getwd() // get working directory if err != nil { log.Error().Msgf("Error getting the working dir %v", err) } fullPathToConfig := fmt.Sprintf("%s%s", dir, "/config/config.yml") - + //TODO: rezolva problema cu config file configuration, err := config.NewConfig(fullPathToConfig) if err != nil { panic(err) } - StartSandbox(tag, vmsName, targetVM, networksNo, configuration) + scenFilePtr := flag.String("scenarios", defaultScenarioFile, "scenario folder") + flag.Parse() -} + scenarios, err := store.LoadScenarios(*scenFilePtr) + if err != nil { + log.Error().Err(err).Str("file", *scenFilePtr).Msgf("failed to read scenarios from file") + return + } -func StartSandbox(tag, vmsName, targetVM string, NetworksNO int, config *config.Config) error { - //wgConfig := d.config.WireguardService + sandboxConf := sandbox.SandConfig{ + Name: "sandbox2", + Tag: "test", + Config: configuration, + } - env, err := sandbox.NewSandbox(sandbox.SandConfig{ - NetworksNO: NetworksNO, - VmName: vmsName, - Tag: tag, - }, config.VmConfig.OvaDir) + sand, err := sandbox.NewSandbox(&sandboxConf) if err != nil { - return err + log.Error().Msg("Problem in creating the newSandbox") } - if err := env.CreateSandbox(tag, targetVM, vmsName, NetworksNO); err != nil { - log.Info().Err(err).Msgf("Sandbox environment is starting") - return err - } + fmt.Println("Acum urmeaza problema") - return nil + if err := sand.StartSandbox(context.TODO(), sandboxConf.Tag, sandboxConf.Name, scenarios); err != nil { + log.Error().Msg("Problem in starting the sandbox2") + } -} \ No newline at end of file +} diff --git a/sandbox/environment.go b/sandbox/environment.go new file mode 100644 index 0000000..116c976 --- /dev/null +++ b/sandbox/environment.go @@ -0,0 +1,688 @@ +package sandbox + +import ( + "context" + "errors" + "fmt" + "github.com/aau-network-security/sandbox/config" + "github.com/aau-network-security/sandbox/controller" + "github.com/aau-network-security/sandbox/models" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "strconv" + "strings" + "sync" + + //"github.com/aau-network-security/sandbox2/models" + "github.com/aau-network-security/sandbox/store" + "github.com/aau-network-security/sandbox/virtual" + "github.com/aau-network-security/sandbox/virtual/docker" + "github.com/aau-network-security/sandbox/virtual/vbox" + "github.com/rs/zerolog/log" +) + +var ( + redListenPort uint = 5181 + blueListenPort uint = 5182 + min = 7900 + max = 7950 + gmin = 5350 + gmax = 5375 + smin = 3000 + smax = 3500 + rmin = 5000 + rmax = 5300 + + ErrVMNotCreated = errors.New("no VM created") + ErrGettingContainerID = errors.New("could not get container ID") +) + +type environment struct { + // challenge microservice should be integrated heres + controller controller.NetController + //wg vpn.WireguardClient + //dhcp dhproto.DHCPClient + dockerHost docker.Host + instances []virtual.Instance + ports []string + vlib vbox.Library + //dnsServer *dns.Server +} + +type SandConfig struct { + //ID string + Name string + Tag string + //WgConfig wg.WireGuardConfig + //Host string + env *environment + Config *config.Config + + //NetworksIP map[string]string + //NetworksNO int + + //redVPNIp string + //blueVPNIp string + //redPort uint + //bluePort uint + //CreatedAt time.Time + //RedPanicLeft uint + //BluePanicLeft uint +} + +func NewSandbox(sandconf *SandConfig) (*SandConfig, error) { + + netController := controller.New() + netController.IPPool = controller.NewIPPoolFromHost() + + dockerHost := docker.NewHost() + vlib := vbox.NewLibrary(sandconf.Config.VmConfig.OvaDir) + env := &environment{ + controller: *netController, + dockerHost: dockerHost, + vlib: vlib, + } + + sandconf.env = env + + log.Info().Msgf("New environment initialized ") + return sandconf, nil +} + +func (gc *SandConfig) StartSandbox(ctx context.Context, tag, name string, scenarios map[int]store.Scenario) error { + + scenario, ok := scenarios[0] + if !ok { + return status.Errorf(codes.InvalidArgument, "No scenario exists with that ID - See valid ID using list command") + } + + log.Info().Str("Game Tag", tag). + Str("Game Name", name). + Str("Scenario", scenario.Name). + Msg("starting sandbox") + + log.Debug().Str("Game", name).Str("bridgeName", tag).Msg("creating openvswitch bridge") + if err := gc.env.initializeOVSBridge(tag); err != nil { + return err + } + + log.Debug().Str("Game", name).Int("Networks", len(scenario.Networks)).Msg("Creating networks") + if err := gc.env.createNetworks(tag, scenario.Networks); err != nil { + return err + } + + var vlanPorts []string + for _, network := range scenario.Networks { + vlanPorts = append(vlanPorts, fmt.Sprintf("%s_%s", tag, network.Name)) + } + vlanPorts = append(vlanPorts, fmt.Sprintf("%s_monitoring", tag)) + + log.Debug().Str("Game", name).Msg("configuring monitoring") + if err := gc.env.configureMonitor(ctx, tag, scenario.Networks); err != nil { + log.Error().Err(err).Msgf("configuring monitoring") + return err + } + + //log.Debug().Str("Game", tag).Msgf("Initilizing VPN VM") + + //assign connection port to RED users + //redTeamVPNPort := getRandomPort(min, max) + + // assign grpc port to wg vms + //wgPort := getRandomPort(gmin, gmax) + + //routerPort := getRandomPort(rmin, rmax) + + //assign connection port to Blue users + //blueTeamVPNPort := getRandomPort(min, max) + + //if err := gc.env.initWireguardVM(ctx, tag, vlanPorts, redTeamVPNPort, blueTeamVPNPort, wgPort, routerPort); err != nil { + // + // return err + //} + + //log.Debug().Str("Game", name).Msg("waiting for wireguard vm to boot") + // + //dhcpClient, err := dhcp.NewDHCPClient(ctx, gc.WgConfig, wgPort) + //if err != nil { + // log.Error().Err(err).Msg("connecting to DHCP service") + // return err + //} + // + //gc.env.dhcp = dhcpClient + + //log.Debug().Str("Game ", name).Msg("starting DHCP server") + + //gc.NetworksIP, err, ipMail, ipDC = gc.env.initDHCPServer(ctx, len(scenario.Networks), scenario) + //if err != nil { + // return err + //} + + //log.Debug().Str("Game ", name).Msg("starting DNS server") + // + //if err := gc.env.initDNSServer(ctx, tag, gc.NetworksIP, scenario, ipMail, ipDC); err != nil { + // log.Error().Err(err).Msg("connecting to DHCP service") + // return err + //} + + //wgClient, err := wg.NewGRPCVPNClient(ctx, gc.WgConfig, wgPort) + //if err != nil { + // log.Error().Err(err).Msg("connecting to wireguard service") + // return err + //} + //gc.env.wg = wgClient + + log.Debug().Str("Game", name).Msg("initializing scenario") + if err := gc.env.initializeScenario(ctx, tag, scenario); err != nil { + return err + } + + //ethInterfaceName := "eth0" // can be customized later + + //redTeamVPNIp, err := gc.env.getRandomIp() + //if err != nil { + // log.Error().Err(err).Msg("Problem in generating red team VPNip") + // return err + //} + //// + //gc.redVPNIp = fmt.Sprintf("%s.0/24", redTeamVPNIp) + ////Assigning a connection port for Red team + // + //gc.redPort = redTeamVPNPort + + // create wireguard interface for red team + //wgNICred := fmt.Sprintf("%s_red", tag) + + // initializing VPN endpoint for red team + //if err := gc.env.initVPNInterface(gc.redVPNIp, redListenPort, wgNICred, ethInterfaceName); err != nil { + // return err + //} + // + //blueTeamVPNIp, err := gc.env.getRandomIp() + //if err != nil { + // log.Error().Err(err).Msg("") + // return err + //} + // + //gc.blueVPNIp = fmt.Sprintf("%s.0/24", blueTeamVPNIp) + // + ////Assigning a connection port for blue team + // + //gc.bluePort = blueTeamVPNPort + // initializing VPN endpoint for blue team + + //create wireguard interface for blue team + //wgNICblue := fmt.Sprintf("%s_blue", tag) + + //if err := gc.env.initVPNInterface(gc.blueVPNIp, blueListenPort, wgNICblue, ethInterfaceName); err != nil { + // return err + //} + + macAddress := "04:d3:b0:9b:ea:d6" + macAddressClean := strings.ReplaceAll(macAddress, ":", "") + + log.Debug().Str("sandbox", tag).Msg("Initalizing SoC") + socPort := getRandomPort(smin, smax) + ifaces := []string{fmt.Sprintf("%s_monitoring", tag), fmt.Sprintf("%s_AllBlue", tag)} + + //Todo: add also internet interface + + //ifaces := + if err := gc.env.initializeSOC(ctx, ifaces, macAddressClean, tag, 2, socPort); err != nil { + log.Error().Err(err).Str("sandbox", tag).Msg("starting SoC vm") + return err + } + + log.Info().Str("Game Tag", tag). + Str("Game Name", name). + Msg("started sandbox") + + return nil +} +func (gc *SandConfig) CloseSandbox(ctx context.Context) error { + var waitg sync.WaitGroup + var failed bool + + log.Info().Str("Game Name", gc.Name).Str("Game Tag", gc.Tag).Msg("Stopping sandbox") + for _, instance := range gc.env.instances { + waitg.Add(1) + go func(vi virtual.Instance) { + defer waitg.Done() + if err := vi.Stop(); err != nil { + log.Error().Str("Instance Type", vi.Info().Type).Str("Instance Name", vi.Info().Id).Msg("failed to stop virtual instance") + failed = true + } + log.Debug().Str("Instance Type", vi.Info().Type).Str("Instance Name", vi.Info().Id).Msg("stopped instance") + if err := vi.Close(); err != nil { + log.Error().Str("Instance Type", vi.Info().Type).Str("Instance Name", vi.Info().Id).Msg("failed to close virtual instance") + failed = true + } + + if vi.Info().Type == "docker" { + if err := gc.env.controller.Ovs.Docker.DeletePorts(gc.Tag, vi.Info().Id); err != nil { + log.Error().Str("Instance Name", vi.Info().Id).Msg("Deleted all ports on docker image") + failed = true + } + } + log.Debug().Str("Instance Type", vi.Info().Type).Str("Instance Name", vi.Info().Id).Msg("closed instance") + }(instance) + + } + waitg.Wait() + if failed { + return errors.New("failed to stop an virtual instance") + } + + if err := gc.env.removeNetworks(gc.Tag); err != nil { + return errors.New("failed to remove networks") + } + + return nil +} + +//func (env *environment) initVPNInterface(ipAddress string, port uint, vpnInterfaceName, ethInterface string) error { +// +// // ipAddress should be in this format : "45.11.23.1/24" +// // port should be unique per interface +// +// _, err := env.wg.InitializeI(context.Background(), &vpn.IReq{ +// Address: ipAddress, +// ListenPort: uint32(port), +// SaveConfig: true, +// Eth: ethInterface, +// IName: vpnInterfaceName, +// DownInterfacesFile: "/etc/network/downinterfaces", +// }) +// if err != nil { +// log.Error().Msgf("Error in initializing interface %v", err) +// return err +// } +// return nil +//} + +//func (env *environment) initDHCPServer(ctx context.Context, numberNetworks int, scenario store.Scenario) (map[string]string, error, string, string) { +// var networks []*dhproto.Network +// var staticHosts []*dhproto.StaticHost +// var ipMail, ipDC string +// +// ipList := make(map[string]string) +// +// for i := 1; i <= numberNetworks; i++ { +// var network dhproto.Network +// randIP, _ := env.controller.IPPool.Get() +// network.Network = randIP + ".0" +// network.Min = randIP + ".6" +// network.Max = randIP + ".250" +// network.Router = randIP + ".1" +// +// ipList[fmt.Sprintf("%d", 10*i)] = randIP + ".0/24" +// network.DnsServer = randIP + ".2" +// networks = append(networks, &network) +// +// } +// +// // Setup monitoring network +// +// monitoringNet := dhproto.Network{ +// Network: "10.10.10.0", +// Min: "10.10.10.6", +// Max: "10.10.10.199", +// Router: "10.10.10.1", +// DnsServer: "10.10.10.2", +// } +// ipList[""] = "10.10.10.0/24" +// +// networks = append(networks, &monitoringNet) +// //Todo: This is scenario based method to make it work +// // in future this needs to be scenario indenpent +// +// for _, item := range scenario.Hosts { +// +// //cast la string acum e lista de stringuri +// if item.Name == "mailserver" { +// +// ipMail = ConstructStaticIP(ipList, item.Networks, item.IPAddr) +// host := dhproto.StaticHost{ +// Name: item.Name, +// +// MacAddress: "04:d3:04:54:fe:15", +// Address: ipMail, +// Router: ConstructStaticIP(ipList, item.Networks, ".1"), +// DomainName: fmt.Sprintf("\"%s\"", item.DNS), +// DnsServer: ConstructStaticIP(ipList, item.Networks, ".2"), +// } +// +// staticHosts = append(staticHosts, &host) +// continue +// +// } else if item.Name == "DCcon" { +// fmt.Printf("Este in bucla cu DCcon \n") +// +// ipDC = ConstructStaticIP(ipList, item.Networks, item.IPAddr) +// fmt.Printf("DCcon IP: %s\n", ipDC) +// +// host := dhproto.StaticHost{ +// Name: item.Name, +// MacAddress: "04:d3:b0:c7:57:c7", +// Address: ipDC, +// Router: ConstructStaticIP(ipList, item.Networks, ".1"), +// DomainName: fmt.Sprintf("\"%s\"", item.DNS), +// DnsServer: ConstructStaticIP(ipList, item.Networks, ".2"), +// } +// staticHosts = append(staticHosts, &host) +// } else { +// fmt.Printf("Este in bucla cu Else. \n") +// continue +// } +// +// } +// +// host := dhproto.StaticHost{ +// Name: "SOC", +// MacAddress: "04:d3:b0:9b:ea:d6", +// Address: "10.10.10.200", +// Router: "10.10.10.1", +// DomainName: "\"blue.monitor.soc\"", +// DnsServer: "10.10.10.2", +// } +// +// staticHosts = append(staticHosts, &host) +// +// _, err := env.dhcp.StartDHCP(ctx, &dhproto.StartReq{Networks: networks, StaticHosts: staticHosts}) +// if err != nil { +// return ipList, err, ipMail, ipDC +// } +// +// return ipList, nil, ipMail, ipDC +//} + +//func (env *environment) initDNSServer(ctx context.Context, bridge string, ipList map[string]string, scenario store.Scenario, IPMail, IPdc string) error { +// +// server, err := dns.New(bridge, ipList, scenario, IPMail, IPdc) +// if err != nil { +// log.Error().Msgf("Error creating DNS server %v", err) +// return err +// } +// env.dnsServer = server +// //env.instances = append(env.instances, server ) +// +// if err := server.Run(ctx); err != nil { +// log.Error().Msgf("Error in starting DNS %v", err) +// return err +// } +// +// contID := server.Container().ID() +// fmt.Printf("AICI e ID = %s\n", contID) +// +// i := 1 +// for _, network := range ipList { +// +// if network == "10.10.10.0/24" { +// +// ipAddrs := strings.TrimSuffix(network, ".0/24") +// ipAddrs = ipAddrs + ".2/24" +// +// fmt.Println(ipAddrs) +// +// if err := env.controller.Ovs.Docker.AddPort(bridge, fmt.Sprintf("eth%d", i), contID, ovs.DockerOptions{IPAddress: ipAddrs}); err != nil { +// +// log.Error().Err(err).Str("container", contID).Msg("adding port to DNS container") +// return err +// } +// i++ +// fmt.Println(i) +// +// } else { +// ipAddrs := strings.TrimSuffix(network, ".0/24") +// ipAddrs = ipAddrs + ".2/24" +// +// fmt.Println(ipAddrs) +// //fmt.Sprintf("eth%d", vlan) +// tag := i * 10 +// +// sTag := strconv.Itoa(tag) +// +// fmt.Println(sTag) +// if err := env.controller.Ovs.Docker.AddPort(bridge, fmt.Sprintf("eth%d", i), contID, ovs.DockerOptions{VlanTag: sTag, IPAddress: ipAddrs}); err != nil { +// +// log.Error().Err(err).Str("container", contID).Msg("adding port to DNS container") +// return err +// } +// i++ +// fmt.Println(i) +// +// } +// +// } +// +// return nil +//} + +//configureMonitor will configure the monitoring VM by attaching the correct interfaces +func (env *environment) configureMonitor(ctx context.Context, bridge string, nets []models.Network) error { + + log.Info().Str("sandbox tag", bridge).Msg("creating monitoring network") + if err := env.createPort(bridge, "monitoring", 0); err != nil { + return err + } + + mirror := fmt.Sprintf("%s_mirror", bridge) + + log.Info().Str("sandbox tag", bridge).Msg("Creating the network mirror") + if err := env.controller.Ovs.VSwitch.CreateMirrorforBridge(mirror, bridge); err != nil { + log.Error().Err(err).Msg("creating mirror") + return err + } + + if err := env.createPort(bridge, "AllBlue", 0); err != nil { + return err + } + + portUUID, err := env.controller.Ovs.VSwitch.GetPortUUID(fmt.Sprintf("%s_AllBlue", bridge)) + if err != nil { + log.Error().Err(err).Str("port", fmt.Sprintf("%s_AllBlue", bridge)).Msg("getting port uuid") + return err + } + + var vlans []string + for _, network := range nets { + vlans = append(vlans, fmt.Sprint(network.Tag)) + } + + if err := env.controller.Ovs.VSwitch.MirrorAllVlans(mirror, portUUID, vlans); err != nil { + log.Error().Err(err).Msgf("mirroring traffic") + return err + } + + return nil +} + +func (env *environment) initializeSOC(ctx context.Context, networks []string, mac string, tag string, nic int, socPort uint) error { + + vm, err := env.vlib.GetCopy(ctx, tag, + vbox.InstanceConfig{Image: "soc2022.ova", + CPU: 4, + MemoryMB: 32384}, + vbox.MapVMPort([]virtual.NatPortSettings{ + { + HostPort: strconv.FormatUint(uint64(socPort), 10), + GuestPort: "22", + ServiceName: "sshd", + Protocol: "tcp", + }, + }), + // SetBridge parameter cleanFirst should be enabled when wireguard/router instance + // is attaching to openvswitch network + vbox.SetBridge(networks, false), + vbox.SetMAC(mac, nic), + ) + + if err != nil { + log.Error().Err(err).Msg("creating copy of SoC VM") + return err + } + if vm == nil { + return ErrVMNotCreated + } + log.Debug().Str("VM", vm.Info().Id).Msg("starting VM") + + if err := vm.Start(ctx); err != nil { + log.Error().Err(err).Msgf("starting virtual machine") + return err + } + env.instances = append(env.instances, vm) + + return nil +} + +//func (env *environment) initWireguardVM(ctx context.Context, tag string, vlanPorts []string, redTeamVPNport, blueTeamVPNport, wgPort uint, routerPort uint) error { +// +// vm, err := env.vlib.GetCopy(ctx, +// tag, +// vbox.InstanceConfig{Image: "Routerfix.ova", +// CPU: 2, +// MemoryMB: 2048}, +// vbox.MapVMPort([]virtual.NatPortSettings{ +// { +// // this is for gRPC service +// HostPort: strconv.FormatUint(uint64(wgPort), 10), +// GuestPort: "5353", +// ServiceName: "wgservice", +// Protocol: "tcp", +// }, +// { +// HostPort: strconv.FormatUint(uint64(redTeamVPNport), 10), +// GuestPort: strconv.FormatUint(uint64(redListenPort), 10), +// ServiceName: "wgRedConnection", +// Protocol: "udp", +// }, +// { +// HostPort: strconv.FormatUint(uint64(blueTeamVPNport), 10), +// GuestPort: strconv.FormatUint(uint64(blueListenPort), 10), +// ServiceName: "wgBlueConnection", +// Protocol: "udp", +// }, +// { +// HostPort: strconv.FormatUint(uint64(routerPort), 10), +// GuestPort: "22", +// ServiceName: "sshd", +// Protocol: "tcp", +// }, +// }), +// // SetBridge parameter cleanFirst should be enabled when wireguard/router instance +// // is attaching to openvswitch network +// vbox.SetBridge(vlanPorts, false), +// ) +// +// if err != nil { +// log.Error().Err(err).Msg("creating VPN VM") +// return err +// } +// if vm == nil { +// return ErrVMNotCreated +// } +// log.Debug().Str("VM", vm.Info().Id).Msg("starting VM") +// +// if err := vm.Start(ctx); err != nil { +// log.Error().Err(err).Msgf("starting virtual machine") +// return err +// } +// env.instances = append(env.instances, vm) +// +// return nil +//} + +//func (gc *SandConfig) CreateVPNConfig(ctx context.Context, isRed bool, idUser string) (VPNConfig, error) { +// +// var nicName string +// +// var allowedIps []string +// var peerIP string +// var endpoint string +// //var dns string +// if isRed { +// //dns = "" +// nicName = fmt.Sprintf("%s_red", gc.Tag) +// +// for key := range gc.NetworksIP { +// if gc.NetworksIP[key] == "10.10.10.0/24" { +// continue +// } +// allowedIps = append(allowedIps, gc.NetworksIP[key]) +// break +// } +// +// peerIP = gc.redVPNIp +// allowedIps = append(allowedIps, peerIP) +// +// endpoint = fmt.Sprintf("%s.%s:%d", gc.Tag, gc.Host, gc.redPort) +// } else { +// +// nicName = fmt.Sprintf("%s_blue", gc.Tag) +// for key := range gc.NetworksIP { +// allowedIps = append(allowedIps, gc.NetworksIP[key]) +// } +// +// peerIP = gc.blueVPNIp +// allowedIps = append(allowedIps, peerIP) +// endpoint = fmt.Sprintf("%s.%s:%d", gc.Tag, gc.Host, gc.bluePort) +// } +// +// serverPubKey, err := gc.env.wg.GetPublicKey(ctx, &vpn.PubKeyReq{PubKeyName: nicName, PrivKeyName: nicName}) +// if err != nil { +// log.Error().Err(err).Str("User", idUser).Msg("Err get public nicName wireguard") +// return VPNConfig{}, err +// } +// +// _, err = gc.env.wg.GenPrivateKey(ctx, &vpn.PrivKeyReq{PrivateKeyName: gc.Tag + "_" + idUser + "_"}) +// if err != nil { +// //fmt.Printf("Err gen private nicName wireguard %v", err) +// log.Error().Err(err).Str("User", idUser).Msg("Err gen private nicName wireguard") +// return VPNConfig{}, err +// } +// +// //generate client public nicName +// //log.Info().Msgf("Generating public nicName for team %s", evTag+"_"+team+"_"+strconv.Itoa(ipAddr)) +// _, err = gc.env.wg.GenPublicKey(ctx, &vpn.PubKeyReq{PubKeyName: gc.Tag + "_" + idUser + "_", PrivKeyName: gc.Tag + "_" + idUser + "_"}) +// if err != nil { +// log.Error().Err(err).Str("User", idUser).Msg("Err gen public nicName client") +// return VPNConfig{}, err +// } +// +// clientPubKey, err := gc.env.wg.GetPublicKey(ctx, &vpn.PubKeyReq{PubKeyName: gc.Tag + "_" + idUser + "_"}) +// if err != nil { +// fmt.Printf("Error on GetPublicKey %v", err) +// return VPNConfig{}, err +// } +// +// pIP := fmt.Sprintf("%d/32", IPcounter()) +// +// peerIP = strings.Replace(peerIP, "0/24", pIP, 1) +// +// _, err = gc.env.wg.AddPeer(ctx, &vpn.AddPReq{ +// Nic: nicName, +// AllowedIPs: peerIP, +// PublicKey: clientPubKey.Message, +// }) +// +// if err != nil { +// log.Error().Err(err).Msg("Error on adding peer to interface") +// return VPNConfig{}, err +// +// } +// +// clientPrivKey, err := gc.env.wg.GetPrivateKey(ctx, &vpn.PrivKeyReq{PrivateKeyName: gc.Tag + "_" + idUser + "_"}) +// if err != nil { +// log.Error().Err(err).Msg("getting priv NIC") +// return VPNConfig{}, err +// } +// +// return VPNConfig{ +// ServerPublicKey: serverPubKey.Message, +// PrivateKeyClient: clientPrivKey.Message, +// Endpoint: endpoint, +// AllowedIPs: strings.Join(allowedIps, ", "), +// PeerIP: peerIP, +// }, nil +// +//} diff --git a/game/helpers.go b/sandbox/helpers.go similarity index 98% rename from game/helpers.go rename to sandbox/helpers.go index 5743026..6b13081 100644 --- a/game/helpers.go +++ b/sandbox/helpers.go @@ -1,4 +1,4 @@ -package game +package sandbox import ( "fmt" diff --git a/game/network.go b/sandbox/network.go similarity index 99% rename from game/network.go rename to sandbox/network.go index 6fa02b6..c1d9a73 100644 --- a/game/network.go +++ b/sandbox/network.go @@ -1,4 +1,4 @@ -package game +package sandbox import ( "fmt" diff --git a/game/scenario.go b/sandbox/scenario.go similarity index 86% rename from game/scenario.go rename to sandbox/scenario.go index 6b8c035..fce411c 100644 --- a/game/scenario.go +++ b/sandbox/scenario.go @@ -1,4 +1,4 @@ -package game +package sandbox import ( "context" @@ -47,8 +47,8 @@ func (env *environment) attachDocker(ctx context.Context, wg *sync.WaitGroup, br container := docker.NewContainer(docker.ContainerConfig{ Image: image, Labels: map[string]string{ - "nap-game": bridge, - "game-networks": strings.Join(nets, ","), + "nap-sandbox": bridge, + "sandbox-networks": strings.Join(nets, ","), }}) if err := container.Create(ctx); err != nil { @@ -171,28 +171,6 @@ func (env *environment) attachVM(ctx context.Context, wg *sync.WaitGroup, name, } } - //if name == "DCcon" { - // vm, err := env.vlib.GetCopy(ctx, - // bridge, - // vbox.InstanceConfig{Image: image, - // CPU: 1, - // MemoryMB: 2048}, - // vbox.SetBridge(ifaceNames, true), - // vbox.SetMAC("04d3b0c757c7", 1), - // - // ) - // log.Error().Err(err).Msg("VM not created ") - // return err - // - // if vm == nil { - // return ErrVMNotCreated - // } - // env.instances = append(env.instances, vm) - // if err := vm.Start(ctx); err != nil { - // log.Error().Err(err).Msg("starting mailserver virtual machine") - // return err - // } - //} return nil } diff --git a/scenarios/scenario.yml b/scenarios/scenario.yml index b3a09ca..90b4083 100644 --- a/scenarios/scenario.yml +++ b/scenarios/scenario.yml @@ -1,14 +1,15 @@ -name: "Test scenario" -topic: finance -FQDN: myfinance.cu -story-red: "Financially motivated adversary groups executing ransomware attacks have rightfully gotten our attention in recent years. -Similar to Lulzec, there's a new group catching attention with different motivations, targeting larger organizations. -Groups of this nature focus on data theft and extortion via means of social engineering - commonly, targeted spear phishing campaigns" -story-blue: "The LAPSUS$ group emerged onto the scene a number of months ago, targeting high-profile organizations such as Nvidia, Samsung, and Ubisoft -- making various demands that in some cases, resulted in either data dumps or screenshots of internal systems shared via the group's Telegram account. -These were sometimes determined by user-voted polls within the group, -suggesting that this is only the beginning of a series of attacks the group is undertaking more frequently as they gain press coverage." -duration: 3 +#name: "Test scenario" +#topic: finance +#FQDN: myfinance.cu +#story-red: "Financially motivated adversary groups executing ransomware attacks have rightfully gotten our attention in recent years. +#Similar to Lulzec, there's a new group catching attention with different motivations, targeting larger organizations. +#Groups of this nature focus on data theft and extortion via means of social engineering - commonly, targeted spear phishing campaigns" +#story-blue: "The LAPSUS$ group emerged onto the scene a number of months ago, targeting high-profile organizations such as Nvidia, Samsung, and Ubisoft +#- making various demands that in some cases, resulted in either data dumps or screenshots of internal systems shared via the group's Telegram account. +#These were sometimes determined by user-voted polls within the group, +#suggesting that this is only the beginning of a series of attacks the group is undertaking more frequently as they gain press coverage." +#duration: 3 + networks: - name: vlan10 tag: 10 @@ -16,8 +17,7 @@ networks: tag: 20 - name: vlan30 tag: 30 -hosts:ls - +hosts: - name: webserver type: docker networks: [ "20" ] diff --git a/scripts/clean.sh b/scripts/clean.sh index a2d73ec..8306108 100644 --- a/scripts/clean.sh +++ b/scripts/clean.sh @@ -1,7 +1,7 @@ #!/bin/bash -# todo: will be modified with more dynamic way of cleaning stuff + #request sudo.... #if [[ $UID != 0 ]]; then @@ -10,28 +10,31 @@ # exit 1 #fi -sudo ovs-vsctl del-br test -sudo ip tuntap del tap0 mode tap -sudo ip tuntap del tap10 mode tap -sudo ip tuntap del tap20 mode tap -sudo ip tuntap del tap30 mode tap -sudo ip tuntap del tap40 mode tap -sudo ip tuntap del tap50 mode tap -sudo ip tuntap del tap60 mode tap -sudo ip tuntap del vlan110 mode tap -sudo ip tuntap del vlan220 mode tap -sudo ip tuntap del vlan330 mode tap -sudo ip tuntap del vlan440 mode tap -sudo ip tuntap del vlan550 mode tap -sudo ip tuntap del mon10 mode tap -sudo ip tuntap del ALLblue mode tap +while getopts "b:" arg; do + case $arg in + b) bridge=$OPTARG;; + esac +done + + +sudo ovs-vsctl del-br $bridge +sudo ip link del ${bridge}_vlan10 +sudo ip link del ${bridge}_vlan20 +sudo ip link del ${bridge}_vlan30 +sudo ip link del ${bridge}_AllBlue +sudo ip link del ${bridge}_monitoring + + +VBoxManage list runningvms | awk '/'$bridge'/ {print $1}' | xargs -I vmid VBoxManage controlvm vmid poweroff +VBoxManage list vms | awk '/'$bridge'/ {print $1}' | xargs -I vmid VBoxManage unregistervm --delete vmid +VBoxManage list runningvms | awk '/'$bridge'/ {print $2}' | xargs -I vmid VBoxManage controlvm vmid poweroff +VBoxManage list vms | awk '/'$bridge'/ {print $2}' | xargs -I vmid VBoxManage unregistervm --delete vmid -VBoxManage list runningvms | awk '/sandbox/ {print $1}' | xargs -I vmid VBoxManage controlvm vmid poweroff -VBoxManage list vms | awk '/sandbox/ {print $2}' | xargs -I vmid VBoxManage unregistervm --delete vmid -rm -rf ~/VirtualBox\ VMs/sandbox* +rm -rf ~/VirtualBox\ VMs/nap-$bridge-* +rm -rf ~/VirtualBox\ VMs/$bridge-* #while read -r line; do # vm=$(echo $line | cut -d ' ' -f 2) @@ -45,18 +48,12 @@ rm -rf ~/VirtualBox\ VMs/sandbox* # Remove all docker containers that have a UUID as name #docker ps -a --format '{{.Names}}' | grep -E '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}' | xargs docker rm -f -docker kill $(docker ps -q -a -f "label=sandbox") - -docker rm $(docker ps -q -a -f "label=sandbox") - - - +docker kill $(docker ps -q -a -f "label=nap-sandbox") -# Remove all macvlan networks -docker network rm $(docker network ls -q -f "label=sandbox") +docker rm $(docker ps -q -a -f "label=nap-sandbox") # Prune entire docker -docker system prune --filter "label=sandbox" +docker system prune --filter "label=nap-sandbox" # Prune volumes -docker volume prune --filter "label=sandbox" \ No newline at end of file +docker volume prune --filter "label=nap-sandbox" \ No newline at end of file diff --git a/store/scenarios.go b/store/scenarios.go index 545ba9e..599b0d0 100644 --- a/store/scenarios.go +++ b/store/scenarios.go @@ -7,7 +7,7 @@ import ( "github.com/aau-network-security/sandbox/models" "github.com/rs/zerolog/log" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" ) var ( @@ -15,15 +15,15 @@ var ( ) type Scenario struct { - Name string `yaml:"name"` - Topic string `yaml:"topic"` - FQDN string `yaml:"FQDN"` - StoryRed string `yaml:"story-red"` - StoryBlue string `yaml:"story-blue"` - Duration uint32 `yaml:"duration"` - Difficulty string `yaml:"difficulty"` - Networks []models.Network `yaml:"networks"` - Hosts []models.Host `yaml:"hosts"` + Name string `yaml:"name"` + //Topic string `yaml:"topic"` + //FQDN string `yaml:"FQDN"` + //StoryRed string `yaml:"story-red"` + //StoryBlue string `yaml:"story-blue"` + //Duration uint32 `yaml:"duration"` + //Difficulty string `yaml:"difficulty"` + Networks []models.Network `yaml:"networks"` + Hosts []models.Host `yaml:"hosts"` } // LoadScenarios will all files in a directory into a map of Scenario diff --git a/virtual/docker/docker.go b/virtual/docker/docker.go index 078496a..1ca9734 100644 --- a/virtual/docker/docker.go +++ b/virtual/docker/docker.go @@ -55,7 +55,7 @@ func init() { log.Fatal().Err(err).Msg("") } - DefaultLinkBridge, err = newDefaultBridge("defatt-bridge") + DefaultLinkBridge, err = newDefaultBridge("sandbox-bridge") if err != nil { log.Fatal().Err(err).Msg("Error creating default bridge") }