I have a microservices with nodejs and mysql and I'm a few days trying to use this application in kubernetes with scalability. I converted each service to an image of a POD, in fact I have a gateway that applies TCP traffic to each service. My problem is that when I run a port-forward for each pod: kubectl port-forward my-gateway --address 192.168.18.x 3000 and kubectl port-forward my-adm-contact --address 192.168.18.x 8181 this works normally but how I can't do a port-forward to multiple pods in a single command I decided to use NGINX Ingress Controller I installed it with helm but it only gives 404 Not Found on the page. I will put my structure below
followed my gateway:
import { Module } from '@nestjs/common';
import { AppController } from './app.controller';
import { ClientsModule, Transport } from '@nestjs/microservices';
import { AppService } from './app.service';
@Module({
imports: [
ClientsModule.register([
{
name: 'SERVICE_A',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8888,
},
},
{
name: 'SERVICE_B',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8889,
},
},
{
name: 'USER',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8887,
},
},
{
name: 'USER_LOGIN',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8886,
},
},
{
name: 'USER_CREATE',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8885,
},
},
{
name: 'USER_UPDATE',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8884,
},
},
{
name: 'CATEGORY',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8883,
},
},
{
name: 'CATEGORY_BUSCA',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8882,
},
},
{
name: 'CATEGORY_PRODUCT',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8881,
},
},
{
name: 'USER_SENHA',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8880,
},
},
{
name: 'ADM_CONTACT',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8181,
},
},
{
name: 'LOCATION',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8878,
},
},
{
name: 'PRODUCT_STAR',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8877,
},
},
{
name: 'PRODUCT_SINGLE',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8876,
},
},
{
name: 'PRODUCT_GET_STAR',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8875,
},
},
{
name: 'PURCHASE_CREATE',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8874,
},
},
{
name: 'PURCHASE_GET_CART',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8873,
},
},
{
name: 'PURCHASE_GET',
transport: Transport.TCP,
options: {
host: "192.168.18.x",
port: 8870,
},
}
]),
],
controllers: [AppController],
providers: [AppService],
})
export class AppModule {}
contact service:
import { NestFactory } from '@nestjs/core';
import { Transport } from '@nestjs/microservices';
import { AppModule } from './app.module';
import { Logger } from '@nestjs/common';
const logger = new Logger();
async function bootstrap() {
const app = await NestFactory.createMicroservice(AppModule, {
transport: Transport.TCP,
options: {
host: "0.0.0.0",
port: 8181,
},
});
app.listen(() => logger.log('Microservice ADM CONTACT is listening'));
}
bootstrap();
kubernetes service contact:
apiVersion: v1
kind: Service
metadata:
name: my-adm-contact-service
namespace: default
spec:
ports:
- port: 8181
protocol: TCP
targetPort: 8181
kubernetes POD contact:
apiVersion: v1
kind: Pod
metadata:
name: my-adm-contact
labels:
app: my-adm-contact
name: my-adm-contact
spec:
containers:
- image: my-adm-contact
imagePullPolicy: Never
name: my-adm-contact
ports:
- containerPort: 8181
protocol: TCP
restartPolicy: Always
kubernetes endpoints contact:
apiVersion: v1
kind: Endpoints
metadata:
name: my-adm-contact-service
subsets:
- addresses:
- ip: 192.168.18.x
ports:
- port: 8181
kubernetes service gateway:
apiVersion: v1
kind: Service
metadata:
name: my-gateway-service
namespace: default
spec:
ports:
- port: 3000
protocol: TCP
targetPort: 3000
kubernetes POD gateway:
apiVersion: v1
kind: Pod
metadata:
name: my-gateway
labels:
app: my-gateway
name: my-gateway
spec:
containers:
- image: api-gateway
imagePullPolicy: Never
name: my-gateway
ports:
- containerPort: 3000
protocol: TCP
restartPolicy: Always
kubernetes endpoints gateway:
apiVersion: v1
kind: Endpoints
metadata:
# the name here should match the name of the Service
name: my-gateway-service
subsets:
- addresses:
- ip: 192.168.18.x
ports:
- port: 3000
kubernetes ingress:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: web-ingress
namespace: default
spec:
ingressClassName: nginx
# defaultBackend:
# service:
# name: my-gateway-service
# port:
# number: 3000
rules:
- host: host.docker.internal
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-gateway-service
port:
number: 3000
- path: /adm-contact
pathType: Prefix
backend:
service:
name: my-adm-contact-service
port:
number: 8181
my ingress-nginx-tcp:
apiVersion: v1
data:
"3000": default/my-gateway-service:3000
"8181": default/my-adm-contact-service:8181
kind: ConfigMap
metadata:
annotations:
meta.helm.sh/release-name: ingress-nginx
meta.helm.sh/release-namespace: ingress-nginx
creationTimestamp: "2022-06-28T00:00:28Z"
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.2.1
helm.sh/chart: ingress-nginx-4.1.4
name: ingress-nginx-tcp
namespace: ingress-nginx
resourceVersion: "264565"
uid: 2284f6eb-53f9-4d8c-9f62-6ad303120f63
Does anyone know how to help me?
CodePudding user response:
Is there a specific reason you've set the ImagePullPolicy
to Never
in your pod specifications? Perhaps try leaving it to default and remove that.
Also, did you ensure that your services are running fine in the cluster? You can try to bypass the ingress
temporarily, and try to access the services directly from outside your cluster by setting the service type to NodePort
see docs here and using <Node-IP>:<nodePort>
to access it. Just to verify the problem is not in the service backend itself? It appears to me that, that is the issue here.