ttzzs commited on
Commit
ccf0441
Β·
verified Β·
1 Parent(s): c40c447

feat: Add Copy to Clipboard with Interactive Preview to Excel Add-in

Browse files
.pytest_cache/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Created by pytest automatically.
2
+ *
.pytest_cache/CACHEDIR.TAG ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Signature: 8a477f597d28d172789f06886806bc55
2
+ # This file is a cache directory tag created by pytest.
3
+ # For information about cache directory tags, see:
4
+ # https://bford.info/cachedir/spec.html
.pytest_cache/v/cache/lastfailed ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "tests/unit/test_dtos.py": true
3
+ }
.pytest_cache/v/cache/nodeids ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ "tests/unit/test_domain_models.py::TestAnomalyPoint::test_create_anomaly",
3
+ "tests/unit/test_domain_models.py::TestAnomalyPoint::test_deviation_percentage",
4
+ "tests/unit/test_domain_models.py::TestAnomalyPoint::test_is_above_expected",
5
+ "tests/unit/test_domain_models.py::TestAnomalyPoint::test_severity_high",
6
+ "tests/unit/test_domain_models.py::TestAnomalyPoint::test_severity_low",
7
+ "tests/unit/test_domain_models.py::TestAnomalyPoint::test_severity_medium",
8
+ "tests/unit/test_domain_models.py::TestAnomalyPoint::test_to_dict",
9
+ "tests/unit/test_domain_models.py::TestForecastConfig::test_create_valid_config",
10
+ "tests/unit/test_domain_models.py::TestForecastConfig::test_default_config",
11
+ "tests/unit/test_domain_models.py::TestForecastConfig::test_empty_quantiles",
12
+ "tests/unit/test_domain_models.py::TestForecastConfig::test_invalid_prediction_length",
13
+ "tests/unit/test_domain_models.py::TestForecastConfig::test_median_auto_added",
14
+ "tests/unit/test_domain_models.py::TestForecastConfig::test_quantile_out_of_range",
15
+ "tests/unit/test_domain_models.py::TestForecastConfig::test_quantiles_sorted",
16
+ "tests/unit/test_domain_models.py::TestForecastResult::test_create_valid_result",
17
+ "tests/unit/test_domain_models.py::TestForecastResult::test_empty_result_raises_error",
18
+ "tests/unit/test_domain_models.py::TestForecastResult::test_get_interval",
19
+ "tests/unit/test_domain_models.py::TestForecastResult::test_get_quantile",
20
+ "tests/unit/test_domain_models.py::TestForecastResult::test_get_quantile_not_found",
21
+ "tests/unit/test_domain_models.py::TestForecastResult::test_median_length_mismatch",
22
+ "tests/unit/test_domain_models.py::TestForecastResult::test_quantile_length_mismatch",
23
+ "tests/unit/test_domain_models.py::TestTimeSeries::test_create_valid_series",
24
+ "tests/unit/test_domain_models.py::TestTimeSeries::test_empty_series_raises_error",
25
+ "tests/unit/test_domain_models.py::TestTimeSeries::test_get_subset",
26
+ "tests/unit/test_domain_models.py::TestTimeSeries::test_non_numeric_values_raise_error",
27
+ "tests/unit/test_domain_models.py::TestTimeSeries::test_null_values_raise_error",
28
+ "tests/unit/test_domain_models.py::TestTimeSeries::test_timestamps_length_mismatch",
29
+ "tests/unit/test_domain_models.py::TestTimeSeries::test_to_dict",
30
+ "tests/unit/test_interfaces.py::TestIDataTransformer::test_cannot_instantiate",
31
+ "tests/unit/test_interfaces.py::TestIDataTransformer::test_is_abstract",
32
+ "tests/unit/test_interfaces.py::TestIForecastModel::test_cannot_instantiate",
33
+ "tests/unit/test_interfaces.py::TestIForecastModel::test_is_abstract",
34
+ "tests/unit/test_interfaces.py::TestIForecastModel::test_validate_context_empty_dataframe",
35
+ "tests/unit/test_interfaces.py::TestIForecastModel::test_validate_context_missing_columns",
36
+ "tests/unit/test_interfaces.py::TestIForecastModel::test_validate_context_null_values",
37
+ "tests/unit/test_interfaces.py::TestIForecastModel::test_validate_context_success",
38
+ "tests/unit/test_logger.py::TestLogger::test_logger_custom_level",
39
+ "tests/unit/test_logger.py::TestLogger::test_logger_different_levels",
40
+ "tests/unit/test_logger.py::TestLogger::test_logger_has_handler",
41
+ "tests/unit/test_logger.py::TestLogger::test_logger_level_from_settings",
42
+ "tests/unit/test_logger.py::TestLogger::test_logger_no_duplicate_handlers",
43
+ "tests/unit/test_logger.py::TestLogger::test_logger_output",
44
+ "tests/unit/test_logger.py::TestLogger::test_setup_logger_basic",
45
+ "tests/unit/test_services.py::TestAnomalyService::test_detect_anomalies_length_mismatch",
46
+ "tests/unit/test_services.py::TestAnomalyService::test_detect_anomalies_success",
47
+ "tests/unit/test_services.py::TestAnomalyService::test_get_anomaly_summary",
48
+ "tests/unit/test_services.py::TestAnomalyService::test_init_service",
49
+ "tests/unit/test_services.py::TestBacktestMetrics::test_create_metrics",
50
+ "tests/unit/test_services.py::TestBacktestMetrics::test_to_dict",
51
+ "tests/unit/test_services.py::TestBacktestService::test_calculate_metrics",
52
+ "tests/unit/test_services.py::TestBacktestService::test_init_service",
53
+ "tests/unit/test_services.py::TestBacktestService::test_simple_backtest_invalid_test_length",
54
+ "tests/unit/test_services.py::TestBacktestService::test_simple_backtest_success",
55
+ "tests/unit/test_services.py::TestForecastService::test_forecast_multi_series",
56
+ "tests/unit/test_services.py::TestForecastService::test_forecast_multi_series_empty_list",
57
+ "tests/unit/test_services.py::TestForecastService::test_forecast_univariate_invalid_series",
58
+ "tests/unit/test_services.py::TestForecastService::test_forecast_univariate_success",
59
+ "tests/unit/test_services.py::TestForecastService::test_init_service",
60
+ "tests/unit/test_settings.py::TestSettings::test_cors_origins_is_list",
61
+ "tests/unit/test_settings.py::TestSettings::test_default_values",
62
+ "tests/unit/test_settings.py::TestSettings::test_get_settings_singleton",
63
+ "tests/unit/test_settings.py::TestSettings::test_settings_from_env",
64
+ "tests/unit/test_settings.py::TestSettings::test_settings_module_instance",
65
+ "tests/unit/test_settings.py::TestSettingsValidation::test_api_version_format",
66
+ "tests/unit/test_settings.py::TestSettingsValidation::test_device_map_valid",
67
+ "tests/unit/test_settings.py::TestSettingsValidation::test_log_level_valid"
68
+ ]
Dockerfile CHANGED
@@ -1,45 +1,32 @@
1
- # Dockerfile optimizado para HuggingFace Spaces
2
  FROM python:3.11-slim
3
 
4
- # Variables de entorno
5
- ENV PYTHONUNBUFFERED=1 \
6
- PYTHONDONTWRITEBYTECODE=1 \
7
- PORT=7860
8
 
9
- # Directorio de trabajo
10
  WORKDIR /app
11
 
12
- # Instalar dependencias del sistema
13
- RUN apt-get update && \
14
- apt-get install -y --no-install-recommends \
15
  build-essential \
16
  curl \
17
- && rm -rf /var/lib/apt/lists/*
 
18
 
19
- # Copiar requirements
20
  COPY requirements.txt .
21
 
22
- # Instalar dependencias Python
23
- RUN pip install --no-cache-dir --upgrade pip && \
24
- pip install --no-cache-dir -r requirements.txt
25
 
26
- # Copiar cΓ³digo de la aplicaciΓ³n
27
- COPY app/ ./app/
28
 
29
- # Copiar archivos estΓ‘ticos del Excel Add-in
30
- COPY static/ ./static/
31
 
32
- # Crear usuario no-root
33
- RUN useradd -m -u 1000 user && \
34
- chown -R user:user /app
35
- USER user
36
 
37
- # Exponer puerto (HF Spaces usa 7860 por defecto)
38
- EXPOSE 7860
 
39
 
40
- # Health check
41
- HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
42
- CMD curl -f http://localhost:7860/health || exit 1
43
-
44
- # Comando de inicio
45
- CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
1
  FROM python:3.11-slim
2
 
3
+ ENV DEBIAN_FRONTEND=noninteractive
4
+ ENV CHRONOS_MODEL_ID=amazon/chronos-2
5
+ ENV DEVICE_MAP=cpu
6
+ ENV PYTHONUNBUFFERED=1
7
 
 
8
  WORKDIR /app
9
 
10
+ RUN apt-get update && apt-get install -y --no-install-recommends \
 
 
11
  build-essential \
12
  curl \
13
+ openssl \
14
+ && rm -rf /var/lib/apt/lists/*
15
 
 
16
  COPY requirements.txt .
17
 
18
+ RUN pip install --no-cache-dir --upgrade pip \
19
+ && pip install --no-cache-dir -r requirements.txt
 
20
 
21
+ COPY app ./app
 
22
 
23
+ # Crear directorio para certificados
24
+ RUN mkdir -p /app/certs
25
 
26
+ EXPOSE 8000
 
 
 
27
 
28
+ # Script de inicio que genera certificados si no existen y ejecuta uvicorn
29
+ COPY entrypoint.sh /app/entrypoint.sh
30
+ RUN chmod +x /app/entrypoint.sh
31
 
32
+ CMD ["/app/entrypoint.sh"]
 
 
 
 
 
app/api/routes/__init__.py CHANGED
@@ -1,17 +0,0 @@
1
- """
2
- API Routes package.
3
-
4
- Contiene todos los endpoints de la API organizados por funcionalidad.
5
- """
6
-
7
- from .health import router as health_router
8
- from .forecast import router as forecast_router
9
- from .anomaly import router as anomaly_router
10
- from .backtest import router as backtest_router
11
-
12
- __all__ = [
13
- "health_router",
14
- "forecast_router",
15
- "anomaly_router",
16
- "backtest_router"
17
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
excel-forecasting-api/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
excel-forecasting-api/Dockerfile ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ ENV DEBIAN_FRONTEND=noninteractive
4
+ ENV CHRONOS_MODEL_ID=amazon/chronos-2
5
+ ENV DEVICE_MAP=cpu
6
+ ENV PYTHONUNBUFFERED=1
7
+ ENV ENABLE_SSL=false
8
+
9
+ WORKDIR /app
10
+
11
+ RUN apt-get update && apt-get install -y --no-install-recommends \
12
+ build-essential \
13
+ curl \
14
+ openssl \
15
+ git \
16
+ && rm -rf /var/lib/apt/lists/*
17
+
18
+ COPY requirements.txt .
19
+
20
+ RUN pip install --no-cache-dir --upgrade pip \
21
+ && pip install --no-cache-dir -r requirements.txt
22
+
23
+ COPY app ./app
24
+
25
+ # Crear directorio para certificados
26
+ RUN mkdir -p /app/certs
27
+
28
+ EXPOSE 7860
29
+
30
+ # Script de inicio que genera certificados si no existen y ejecuta uvicorn
31
+ COPY entrypoint.sh /app/entrypoint.sh
32
+ RUN chmod +x /app/entrypoint.sh
33
+
34
+ CMD ["/app/entrypoint.sh"]
excel-forecasting-api/app/__init__.py ADDED
File without changes
excel-forecasting-api/app/main.py ADDED
@@ -0,0 +1,649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Dict, Optional
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ from fastapi import FastAPI, HTTPException
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from pydantic import BaseModel, Field
9
+
10
+ from chronos import Chronos2Pipeline
11
+
12
+
13
+ # =========================
14
+ # ConfiguraciΓ³n del modelo
15
+ # =========================
16
+
17
+ MODEL_ID = os.getenv("CHRONOS_MODEL_ID", "amazon/chronos-2")
18
+ DEVICE_MAP = os.getenv("DEVICE_MAP", "cpu") # "cpu" o "cuda"
19
+ ALLOWED_ORIGINS_ENV = os.getenv("ALLOWED_ORIGINS")
20
+ ALLOWED_ORIGINS = (
21
+ [origin.strip() for origin in ALLOWED_ORIGINS_ENV.split(",") if origin.strip()]
22
+ if ALLOWED_ORIGINS_ENV
23
+ else ["*"]
24
+ )
25
+
26
+ app = FastAPI(
27
+ title="Chronos-2 Universal Forecasting API",
28
+ description=(
29
+ "Servidor local (Docker) para pronΓ³sticos con Chronos-2: univariante, "
30
+ "multivariante, covariables, escenarios, anomalΓ­as y backtesting."
31
+ ),
32
+ version="1.0.0",
33
+ )
34
+
35
+ # Configurar CORS para Excel Add-in
36
+ app.add_middleware(
37
+ CORSMiddleware,
38
+ allow_origins=ALLOWED_ORIGINS,
39
+ allow_credentials=True,
40
+ allow_methods=["*"],
41
+ allow_headers=["*"],
42
+ )
43
+
44
+ # Carga ΓΊnica del modelo al iniciar el proceso
45
+ pipeline = Chronos2Pipeline.from_pretrained(MODEL_ID, device_map=DEVICE_MAP)
46
+
47
+
48
+ # =========================
49
+ # Modelos Pydantic comunes
50
+ # =========================
51
+
52
+ class BaseForecastConfig(BaseModel):
53
+ prediction_length: int = Field(
54
+ 7, description="Horizonte de predicciΓ³n (nΓΊmero de pasos futuros)"
55
+ )
56
+ quantile_levels: List[float] = Field(
57
+ default_factory=lambda: [0.1, 0.5, 0.9],
58
+ description="Cuantiles para el pronΓ³stico probabilΓ­stico",
59
+ )
60
+ start_timestamp: Optional[str] = Field(
61
+ default=None,
62
+ description=(
63
+ "Fecha/hora inicial del histΓ³rico (formato ISO). "
64
+ "Si no se especifica, se usan Γ­ndices enteros."
65
+ ),
66
+ )
67
+ freq: str = Field(
68
+ "D",
69
+ description="Frecuencia temporal (p.ej. 'D' diario, 'H' horario, 'W' semanal...).",
70
+ )
71
+
72
+
73
+ class UnivariateSeries(BaseModel):
74
+ values: List[float]
75
+
76
+
77
+ class MultiSeriesItem(BaseModel):
78
+ series_id: str
79
+ values: List[float]
80
+
81
+
82
+ class CovariatePoint(BaseModel):
83
+ """
84
+ Punto temporal usado tanto para contexto (histΓ³rico) como para covariables futuras.
85
+ """
86
+ timestamp: Optional[str] = None # opcional si se usan Γ­ndices enteros
87
+ id: Optional[str] = None # id de serie, por defecto 'series_0'
88
+ target: Optional[float] = None # valor de la variable objetivo (histΓ³rico)
89
+ covariates: Dict[str, float] = Field(
90
+ default_factory=dict,
91
+ description="Nombre -> valor de cada covariable dinΓ‘mica.",
92
+ )
93
+
94
+
95
+ # =========================
96
+ # 1) Healthcheck
97
+ # =========================
98
+
99
+ @app.get("/health")
100
+ def health():
101
+ """
102
+ Devuelve informaciΓ³n bΓ‘sica del estado del servidor y el modelo cargado.
103
+ """
104
+ return {
105
+ "status": "ok",
106
+ "model_id": MODEL_ID,
107
+ "device_map": DEVICE_MAP,
108
+ }
109
+
110
+
111
+ # =========================
112
+ # 2) PronΓ³stico univariante
113
+ # =========================
114
+
115
+ class ForecastUnivariateRequest(BaseForecastConfig):
116
+ series: UnivariateSeries
117
+
118
+
119
+ class ForecastUnivariateResponse(BaseModel):
120
+ timestamps: List[str]
121
+ median: List[float]
122
+ quantiles: Dict[str, List[float]] # "0.1" -> [..], "0.9" -> [..]
123
+
124
+
125
+ @app.post("/forecast_univariate", response_model=ForecastUnivariateResponse)
126
+ def forecast_univariate(req: ForecastUnivariateRequest):
127
+ """
128
+ PronΓ³stico para una sola serie temporal (univariante, sin covariables).
129
+ Pensado para uso directo desde Excel u otras herramientas sencillas.
130
+ """
131
+ values = req.series.values
132
+ n = len(values)
133
+ if n == 0:
134
+ raise HTTPException(status_code=400, detail="La serie no puede estar vacΓ­a.")
135
+
136
+ # Construimos contexto como DataFrame largo (id, timestamp, target)
137
+ if req.start_timestamp:
138
+ timestamps = pd.date_range(
139
+ start=pd.to_datetime(req.start_timestamp),
140
+ periods=n,
141
+ freq=req.freq,
142
+ )
143
+ else:
144
+ timestamps = pd.RangeIndex(start=0, stop=n, step=1)
145
+
146
+ context_df = pd.DataFrame(
147
+ {
148
+ "id": ["series_0"] * n,
149
+ "timestamp": timestamps,
150
+ "target": values,
151
+ }
152
+ )
153
+
154
+ pred_df = pipeline.predict_df(
155
+ context_df,
156
+ prediction_length=req.prediction_length,
157
+ quantile_levels=req.quantile_levels,
158
+ id_column="id",
159
+ timestamp_column="timestamp",
160
+ target="target",
161
+ )
162
+
163
+ pred_df = pred_df.sort_values("timestamp")
164
+ timestamps_out = pred_df["timestamp"].astype(str).tolist()
165
+ median = pred_df["predictions"].astype(float).tolist()
166
+
167
+ quantiles_dict: Dict[str, List[float]] = {}
168
+ for q in req.quantile_levels:
169
+ key = f"{q:.3g}"
170
+ if key in pred_df.columns:
171
+ quantiles_dict[key] = pred_df[key].astype(float).tolist()
172
+
173
+ return ForecastUnivariateResponse(
174
+ timestamps=timestamps_out,
175
+ median=median,
176
+ quantiles=quantiles_dict,
177
+ )
178
+
179
+
180
+ # =========================
181
+ # 3) Multi-serie (multi-id)
182
+ # =========================
183
+
184
+ class ForecastMultiSeriesRequest(BaseForecastConfig):
185
+ series_list: List[MultiSeriesItem]
186
+
187
+
188
+ class SeriesForecast(BaseModel):
189
+ series_id: str
190
+ timestamps: List[str]
191
+ median: List[float]
192
+ quantiles: Dict[str, List[float]]
193
+
194
+
195
+ class ForecastMultiSeriesResponse(BaseModel):
196
+ forecasts: List[SeriesForecast]
197
+
198
+
199
+ @app.post("/forecast_multi_id", response_model=ForecastMultiSeriesResponse)
200
+ def forecast_multi_id(req: ForecastMultiSeriesRequest):
201
+ """
202
+ PronΓ³stico para mΓΊltiples series (por ejemplo, varios SKU o tiendas).
203
+ """
204
+ if not req.series_list:
205
+ raise HTTPException(status_code=400, detail="Debes enviar al menos una serie.")
206
+
207
+ frames = []
208
+ for item in req.series_list:
209
+ n = len(item.values)
210
+ if n == 0:
211
+ continue
212
+ if req.start_timestamp:
213
+ timestamps = pd.date_range(
214
+ start=pd.to_datetime(req.start_timestamp),
215
+ periods=n,
216
+ freq=req.freq,
217
+ )
218
+ else:
219
+ timestamps = pd.RangeIndex(start=0, stop=n, step=1)
220
+
221
+ frames.append(
222
+ pd.DataFrame(
223
+ {
224
+ "id": [item.series_id] * n,
225
+ "timestamp": timestamps,
226
+ "target": item.values,
227
+ }
228
+ )
229
+ )
230
+
231
+ if not frames:
232
+ raise HTTPException(status_code=400, detail="Todas las series estΓ‘n vacΓ­as.")
233
+
234
+ context_df = pd.concat(frames, ignore_index=True)
235
+
236
+ pred_df = pipeline.predict_df(
237
+ context_df,
238
+ prediction_length=req.prediction_length,
239
+ quantile_levels=req.quantile_levels,
240
+ id_column="id",
241
+ timestamp_column="timestamp",
242
+ target="target",
243
+ )
244
+
245
+ forecasts: List[SeriesForecast] = []
246
+ for series_id, group in pred_df.groupby("id"):
247
+ group = group.sort_values("timestamp")
248
+ timestamps_out = group["timestamp"].astype(str).tolist()
249
+ median = group["predictions"].astype(float).tolist()
250
+ quantiles_dict: Dict[str, List[float]] = {}
251
+ for q in req.quantile_levels:
252
+ key = f"{q:.3g}"
253
+ if key in group.columns:
254
+ quantiles_dict[key] = group[key].astype(float).tolist()
255
+
256
+ forecasts.append(
257
+ SeriesForecast(
258
+ series_id=series_id,
259
+ timestamps=timestamps_out,
260
+ median=median,
261
+ quantiles=quantiles_dict,
262
+ )
263
+ )
264
+
265
+ return ForecastMultiSeriesResponse(forecasts=forecasts)
266
+
267
+
268
+ # =========================
269
+ # 4) PronΓ³stico con covariables
270
+ # =========================
271
+
272
+ class ForecastWithCovariatesRequest(BaseForecastConfig):
273
+ context: List[CovariatePoint]
274
+ future: Optional[List[CovariatePoint]] = None
275
+
276
+
277
+ class ForecastWithCovariatesResponse(BaseModel):
278
+ # filas con todas las columnas de pred_df serializadas como string
279
+ pred_df: List[Dict[str, str]]
280
+
281
+
282
+ @app.post("/forecast_with_covariates", response_model=ForecastWithCovariatesResponse)
283
+ def forecast_with_covariates(req: ForecastWithCovariatesRequest):
284
+ """
285
+ PronΓ³stico con informaciΓ³n de covariables (promos, precio, clima...) tanto
286
+ en el histΓ³rico (context) como en futuros posibles (future).
287
+ """
288
+ if not req.context:
289
+ raise HTTPException(status_code=400, detail="El contexto no puede estar vacΓ­o.")
290
+
291
+ ctx_rows = []
292
+ for p in req.context:
293
+ if p.target is None:
294
+ continue
295
+ row = {
296
+ "id": p.id or "series_0",
297
+ "timestamp": p.timestamp,
298
+ "target": p.target,
299
+ }
300
+ for k, v in p.covariates.items():
301
+ row[k] = v
302
+ ctx_rows.append(row)
303
+
304
+ context_df = pd.DataFrame(ctx_rows)
305
+ if "timestamp" not in context_df or context_df["timestamp"].isna().any():
306
+ context_df["timestamp"] = pd.RangeIndex(start=0, stop=len(context_df), step=1)
307
+
308
+ future_df = None
309
+ if req.future:
310
+ fut_rows = []
311
+ for p in req.future:
312
+ row = {
313
+ "id": p.id or "series_0",
314
+ "timestamp": p.timestamp,
315
+ }
316
+ for k, v in p.covariates.items():
317
+ row[k] = v
318
+ fut_rows.append(row)
319
+ future_df = pd.DataFrame(fut_rows)
320
+ if "timestamp" not in future_df or future_df["timestamp"].isna().any():
321
+ future_df["timestamp"] = pd.RangeIndex(
322
+ start=context_df["timestamp"].max() + 1,
323
+ stop=context_df["timestamp"].max() + 1 + len(future_df),
324
+ step=1,
325
+ )
326
+
327
+ pred_df = pipeline.predict_df(
328
+ context_df,
329
+ future_df=future_df,
330
+ prediction_length=req.prediction_length,
331
+ quantile_levels=req.quantile_levels,
332
+ id_column="id",
333
+ timestamp_column="timestamp",
334
+ target="target",
335
+ )
336
+
337
+ pred_df = pred_df.sort_values(["id", "timestamp"])
338
+ out_records: List[Dict[str, str]] = []
339
+ for _, row in pred_df.iterrows():
340
+ record = {k: str(v) for k, v in row.items()}
341
+ out_records.append(record)
342
+
343
+ return ForecastWithCovariatesResponse(pred_df=out_records)
344
+
345
+
346
+ # =========================
347
+ # 5) Multivariante (varios targets)
348
+ # =========================
349
+
350
+ class MultivariateContextPoint(BaseModel):
351
+ timestamp: Optional[str] = None
352
+ id: Optional[str] = None
353
+ targets: Dict[str, float] # p.ej. {"demand": 100, "returns": 5}
354
+ covariates: Dict[str, float] = Field(default_factory=dict)
355
+
356
+
357
+ class ForecastMultivariateRequest(BaseForecastConfig):
358
+ context: List[MultivariateContextPoint]
359
+ target_columns: List[str] # nombres de columnas objetivo
360
+
361
+
362
+ class ForecastMultivariateResponse(BaseModel):
363
+ pred_df: List[Dict[str, str]]
364
+
365
+
366
+ @app.post("/forecast_multivariate", response_model=ForecastMultivariateResponse)
367
+ def forecast_multivariate(req: ForecastMultivariateRequest):
368
+ """
369
+ PronΓ³stico multivariante: mΓΊltiples columnas objetivo (p.ej. demanda y devoluciones).
370
+ """
371
+ if not req.context:
372
+ raise HTTPException(status_code=400, detail="El contexto no puede estar vacΓ­o.")
373
+ if not req.target_columns:
374
+ raise HTTPException(status_code=400, detail="Debes indicar columnas objetivo.")
375
+
376
+ rows = []
377
+ for p in req.context:
378
+ base = {
379
+ "id": p.id or "series_0",
380
+ "timestamp": p.timestamp,
381
+ }
382
+ for t_name, t_val in p.targets.items():
383
+ base[t_name] = t_val
384
+ for k, v in p.covariates.items():
385
+ base[k] = v
386
+ rows.append(base)
387
+
388
+ context_df = pd.DataFrame(rows)
389
+ if "timestamp" not in context_df or context_df["timestamp"].isna().any():
390
+ context_df["timestamp"] = pd.RangeIndex(start=0, stop=len(context_df), step=1)
391
+
392
+ pred_df = pipeline.predict_df(
393
+ context_df,
394
+ prediction_length=req.prediction_length,
395
+ quantile_levels=req.quantile_levels,
396
+ id_column="id",
397
+ timestamp_column="timestamp",
398
+ target=req.target_columns,
399
+ )
400
+
401
+ pred_df = pred_df.sort_values(["id", "timestamp"])
402
+ out_records = [{k: str(v) for k, v in row.items()} for _, row in pred_df.iterrows()]
403
+ return ForecastMultivariateResponse(pred_df=out_records)
404
+
405
+
406
+ # =========================
407
+ # 6) Escenarios (what-if)
408
+ # =========================
409
+
410
+ class ScenarioDefinition(BaseModel):
411
+ name: str
412
+ future_covariates: List[CovariatePoint]
413
+
414
+
415
+ class ScenarioForecast(BaseModel):
416
+ name: str
417
+ pred_df: List[Dict[str, str]]
418
+
419
+
420
+ class ForecastScenariosRequest(BaseForecastConfig):
421
+ context: List[CovariatePoint]
422
+ scenarios: List[ScenarioDefinition]
423
+
424
+
425
+ class ForecastScenariosResponse(BaseModel):
426
+ scenarios: List[ScenarioForecast]
427
+
428
+
429
+ @app.post("/forecast_scenarios", response_model=ForecastScenariosResponse)
430
+ def forecast_scenarios(req: ForecastScenariosRequest):
431
+ """
432
+ EvaluaciΓ³n de mΓΊltiples escenarios (what-if) cambiando las covariables futuras
433
+ (por ejemplo, promo ON/OFF, diferentes precios, etc.).
434
+ """
435
+ if not req.context:
436
+ raise HTTPException(status_code=400, detail="El contexto no puede estar vacΓ­o.")
437
+ if not req.scenarios:
438
+ raise HTTPException(status_code=400, detail="Debes definir al menos un escenario.")
439
+
440
+ ctx_rows = []
441
+ for p in req.context:
442
+ if p.target is None:
443
+ continue
444
+ row = {
445
+ "id": p.id or "series_0",
446
+ "timestamp": p.timestamp,
447
+ "target": p.target,
448
+ }
449
+ for k, v in p.covariates.items():
450
+ row[k] = v
451
+ ctx_rows.append(row)
452
+
453
+ context_df = pd.DataFrame(ctx_rows)
454
+ if "timestamp" not in context_df or context_df["timestamp"].isna().any():
455
+ context_df["timestamp"] = pd.RangeIndex(start=0, stop=len(context_df), step=1)
456
+
457
+ results: List[ScenarioForecast] = []
458
+
459
+ for scen in req.scenarios:
460
+ fut_rows = []
461
+ for p in scen.future_covariates:
462
+ row = {
463
+ "id": p.id or "series_0",
464
+ "timestamp": p.timestamp,
465
+ }
466
+ for k, v in p.covariates.items():
467
+ row[k] = v
468
+ fut_rows.append(row)
469
+ future_df = pd.DataFrame(fut_rows)
470
+ if "timestamp" not in future_df or future_df["timestamp"].isna().any():
471
+ future_df["timestamp"] = pd.RangeIndex(
472
+ start=context_df["timestamp"].max() + 1,
473
+ stop=context_df["timestamp"].max() + 1 + len(future_df),
474
+ step=1,
475
+ )
476
+
477
+ pred_df = pipeline.predict_df(
478
+ context_df,
479
+ future_df=future_df,
480
+ prediction_length=req.prediction_length,
481
+ quantile_levels=req.quantile_levels,
482
+ id_column="id",
483
+ timestamp_column="timestamp",
484
+ target="target",
485
+ )
486
+ pred_df = pred_df.sort_values(["id", "timestamp"])
487
+ out_records = [{k: str(v) for k, v in row.items()} for _, row in pred_df.iterrows()]
488
+
489
+ results.append(ScenarioForecast(name=scen.name, pred_df=out_records))
490
+
491
+ return ForecastScenariosResponse(scenarios=results)
492
+
493
+
494
+ # =========================
495
+ # 7) DetecciΓ³n de anomalΓ­as
496
+ # =========================
497
+
498
+ class AnomalyDetectionRequest(BaseModel):
499
+ context: UnivariateSeries
500
+ recent_observed: List[float]
501
+ prediction_length: int = 7
502
+ quantile_low: float = 0.05
503
+ quantile_high: float = 0.95
504
+
505
+
506
+ class AnomalyPoint(BaseModel):
507
+ index: int
508
+ value: float
509
+ predicted_median: float
510
+ lower: float
511
+ upper: float
512
+ is_anomaly: bool
513
+
514
+
515
+ class AnomalyDetectionResponse(BaseModel):
516
+ anomalies: List[AnomalyPoint]
517
+
518
+
519
+ @app.post("/detect_anomalies", response_model=AnomalyDetectionResponse)
520
+ def detect_anomalies(req: AnomalyDetectionRequest):
521
+ """
522
+ Marca como anomalΓ­as los puntos observados recientes que caen fuera del
523
+ intervalo [quantile_low, quantile_high] del pronΓ³stico.
524
+ """
525
+ n_hist = len(req.context.values)
526
+ if n_hist == 0:
527
+ raise HTTPException(status_code=400, detail="La serie histΓ³rica no puede estar vacΓ­a.")
528
+ if len(req.recent_observed) != req.prediction_length:
529
+ raise HTTPException(
530
+ status_code=400,
531
+ detail="recent_observed debe tener la misma longitud que prediction_length.",
532
+ )
533
+
534
+ context_df = pd.DataFrame(
535
+ {
536
+ "id": ["series_0"] * n_hist,
537
+ "timestamp": pd.RangeIndex(start=0, stop=n_hist, step=1),
538
+ "target": req.context.values,
539
+ }
540
+ )
541
+
542
+ quantiles = sorted({req.quantile_low, 0.5, req.quantile_high})
543
+ pred_df = pipeline.predict_df(
544
+ context_df,
545
+ prediction_length=req.prediction_length,
546
+ quantile_levels=quantiles,
547
+ id_column="id",
548
+ timestamp_column="timestamp",
549
+ target="target",
550
+ ).sort_values("timestamp")
551
+
552
+ q_low_col = f"{req.quantile_low:.3g}"
553
+ q_high_col = f"{req.quantile_high:.3g}"
554
+
555
+ anomalies: List[AnomalyPoint] = []
556
+ for i, (obs, (_, row)) in enumerate(zip(req.recent_observed, pred_df.iterrows())):
557
+ lower = float(row[q_low_col])
558
+ upper = float(row[q_high_col])
559
+ median = float(row["predictions"])
560
+ is_anom = (obs < lower) or (obs > upper)
561
+ anomalies.append(
562
+ AnomalyPoint(
563
+ index=i,
564
+ value=obs,
565
+ predicted_median=median,
566
+ lower=lower,
567
+ upper=upper,
568
+ is_anomaly=is_anom,
569
+ )
570
+ )
571
+
572
+ return AnomalyDetectionResponse(anomalies=anomalies)
573
+
574
+
575
+ # =========================
576
+ # 8) Backtest simple
577
+ # =========================
578
+
579
+ class BacktestRequest(BaseModel):
580
+ series: UnivariateSeries
581
+ prediction_length: int = 7
582
+ test_length: int = 28
583
+
584
+
585
+ class BacktestMetrics(BaseModel):
586
+ mae: float
587
+ mape: float
588
+ wql: float # Weighted Quantile Loss aproximada para el cuantil 0.5
589
+
590
+
591
+ class BacktestResponse(BaseModel):
592
+ metrics: BacktestMetrics
593
+ forecast_median: List[float]
594
+ forecast_timestamps: List[str]
595
+ actuals: List[float]
596
+
597
+
598
+ @app.post("/backtest_simple", response_model=BacktestResponse)
599
+ def backtest_simple(req: BacktestRequest):
600
+ """
601
+ Backtest sencillo: separamos un tramo final de la serie como test, pronosticamos
602
+ ese tramo y calculamos mΓ©tricas MAE / MAPE / WQL.
603
+ """
604
+ values = np.array(req.series.values, dtype=float)
605
+ n = len(values)
606
+ if n <= req.test_length:
607
+ raise HTTPException(
608
+ status_code=400,
609
+ detail="La serie debe ser mΓ‘s larga que test_length.",
610
+ )
611
+
612
+ train = values[: n - req.test_length]
613
+ test = values[n - req.test_length :]
614
+
615
+ context_df = pd.DataFrame(
616
+ {
617
+ "id": ["series_0"] * len(train),
618
+ "timestamp": pd.RangeIndex(start=0, stop=len(train), step=1),
619
+ "target": train.tolist(),
620
+ }
621
+ )
622
+
623
+ pred_df = pipeline.predict_df(
624
+ context_df,
625
+ prediction_length=req.test_length,
626
+ quantile_levels=[0.5],
627
+ id_column="id",
628
+ timestamp_column="timestamp",
629
+ target="target",
630
+ ).sort_values("timestamp")
631
+
632
+ forecast = pred_df["predictions"].to_numpy(dtype=float)
633
+ timestamps = pred_df["timestamp"].astype(str).tolist()
634
+
635
+ mae = float(np.mean(np.abs(test - forecast)))
636
+ eps = 1e-8
637
+ mape = float(np.mean(np.abs((test - forecast) / (test + eps)))) * 100.0
638
+ tau = 0.5
639
+ diff = test - forecast
640
+ wql = float(np.mean(np.maximum(tau * diff, (tau - 1) * diff)))
641
+
642
+ metrics = BacktestMetrics(mae=mae, mape=mape, wql=wql)
643
+
644
+ return BacktestResponse(
645
+ metrics=metrics,
646
+ forecast_median=forecast.tolist(),
647
+ forecast_timestamps=timestamps,
648
+ actuals=test.tolist(),
649
+ )
excel-forecasting-api/entrypoint.sh ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -euo pipefail
3
+
4
+ PORT="${PORT:-7860}"
5
+ ENABLE_SSL="${ENABLE_SSL:-true}"
6
+
7
+ mkdir -p /app/certs
8
+
9
+ if [ "${ENABLE_SSL}" = "true" ]; then
10
+ if [ ! -f /app/certs/server.key ] || [ ! -f /app/certs/server.crt ]; then
11
+ echo "Generating self-signed SSL certificates"
12
+ openssl req -x509 -newkey rsa:2048 \
13
+ -keyout /app/certs/server.key \
14
+ -out /app/certs/server.crt \
15
+ -days 365 -nodes \
16
+ -subj "/C=US/ST=State/L=City/O=Chronos2/CN=localhost"
17
+ chmod 644 /app/certs/server.*
18
+ else
19
+ echo "Reusing existing SSL certificates"
20
+ fi
21
+
22
+ echo "Starting HTTPS server on port ${PORT}"
23
+ exec uvicorn app.main:app \
24
+ --host 0.0.0.0 \
25
+ --port "${PORT}" \
26
+ --ssl-keyfile /app/certs/server.key \
27
+ --ssl-certfile /app/certs/server.crt
28
+ else
29
+ echo "Starting HTTP server on port ${PORT}"
30
+ exec uvicorn app.main:app \
31
+ --host 0.0.0.0 \
32
+ --port "${PORT}"
33
+ fi
excel-forecasting-api/requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ --index-url https://download.pytorch.org/whl/cpu
2
+ --extra-index-url https://pypi.org/simple
3
+
4
+ torch==2.9.0+cpu
5
+ chronos-forecasting>=2.0.0
6
+ pandas[pyarrow]
7
+ fastapi
8
+ uvicorn[standard]
9
+ numpy
pytest.ini ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [pytest]
2
+ # Pytest configuration
3
+ testpaths = tests
4
+ python_files = test_*.py
5
+ python_classes = Test*
6
+ python_functions = test_*
7
+
8
+ # Output options
9
+ addopts =
10
+ -v
11
+ --strict-markers
12
+ --tb=short
13
+ --disable-warnings
14
+
15
+ # Coverage options (cuando se use pytest-cov)
16
+ # addopts += --cov=app --cov-report=html --cov-report=term-missing
17
+
18
+ # Markers
19
+ markers =
20
+ unit: Unit tests
21
+ integration: Integration tests
22
+ slow: Slow tests
static/taskpane/taskpane.css CHANGED
@@ -306,3 +306,223 @@ body {
306
  .results-log::-webkit-scrollbar-thumb:hover {
307
  background: #555;
308
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
  .results-log::-webkit-scrollbar-thumb:hover {
307
  background: #555;
308
  }
309
+
310
+ /* ====================================================================
311
+ FORECAST PREVIEW & COPY TO CLIPBOARD
312
+ ==================================================================== */
313
+
314
+ /* Forecast Preview Card */
315
+ .forecast-preview-card {
316
+ background: white;
317
+ border-radius: 12px;
318
+ padding: 16px;
319
+ margin-bottom: 20px;
320
+ box-shadow: 0 4px 12px rgba(0,0,0,0.1);
321
+ border: 2px solid #667eea;
322
+ animation: slideIn 0.3s ease-out;
323
+ }
324
+
325
+ @keyframes slideIn {
326
+ from {
327
+ opacity: 0;
328
+ transform: translateY(-10px);
329
+ }
330
+ to {
331
+ opacity: 1;
332
+ transform: translateY(0);
333
+ }
334
+ }
335
+
336
+ .preview-header {
337
+ display: flex;
338
+ justify-content: space-between;
339
+ align-items: center;
340
+ margin-bottom: 12px;
341
+ padding-bottom: 12px;
342
+ border-bottom: 2px solid #f0f0f0;
343
+ }
344
+
345
+ .preview-header h3 {
346
+ font-size: 16px;
347
+ font-weight: 600;
348
+ color: #667eea;
349
+ margin: 0;
350
+ }
351
+
352
+ .preview-count {
353
+ font-size: 12px;
354
+ background: #667eea;
355
+ color: white;
356
+ padding: 4px 12px;
357
+ border-radius: 12px;
358
+ font-weight: 500;
359
+ }
360
+
361
+ .preview-table-container {
362
+ max-height: 200px;
363
+ overflow-y: auto;
364
+ margin-bottom: 12px;
365
+ border-radius: 6px;
366
+ border: 1px solid #e5e7eb;
367
+ }
368
+
369
+ .preview-table {
370
+ width: 100%;
371
+ border-collapse: collapse;
372
+ font-size: 12px;
373
+ }
374
+
375
+ .preview-table thead {
376
+ position: sticky;
377
+ top: 0;
378
+ background: #f9fafb;
379
+ z-index: 1;
380
+ }
381
+
382
+ .preview-table th {
383
+ padding: 8px;
384
+ text-align: left;
385
+ font-weight: 600;
386
+ color: #374151;
387
+ border-bottom: 2px solid #e5e7eb;
388
+ }
389
+
390
+ .preview-table td {
391
+ padding: 8px;
392
+ border-bottom: 1px solid #f0f0f0;
393
+ color: #4b5563;
394
+ }
395
+
396
+ .preview-table tbody tr:hover {
397
+ background: #f9fafb;
398
+ }
399
+
400
+ .preview-table .preview-more td {
401
+ text-align: center;
402
+ font-style: italic;
403
+ color: #9ca3af;
404
+ }
405
+
406
+ .preview-actions {
407
+ display: flex;
408
+ flex-direction: column;
409
+ gap: 8px;
410
+ }
411
+
412
+ .btn-copy-forecast {
413
+ width: 100%;
414
+ font-size: 14px;
415
+ font-weight: 600;
416
+ padding: 12px;
417
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
418
+ box-shadow: 0 2px 8px rgba(102, 126, 234, 0.3);
419
+ }
420
+
421
+ .btn-copy-forecast:hover {
422
+ transform: translateY(-1px);
423
+ box-shadow: 0 4px 12px rgba(102, 126, 234, 0.4);
424
+ }
425
+
426
+ .preview-hint {
427
+ text-align: center;
428
+ font-size: 11px;
429
+ color: #6b7280;
430
+ padding: 8px;
431
+ background: #f9fafb;
432
+ border-radius: 6px;
433
+ }
434
+
435
+ /* Copy Toast Notification */
436
+ .copy-toast {
437
+ position: fixed;
438
+ top: 20px;
439
+ right: 20px;
440
+ z-index: 9999;
441
+ background: linear-gradient(135deg, #10b981 0%, #059669 100%);
442
+ color: white;
443
+ padding: 16px 20px;
444
+ border-radius: 12px;
445
+ box-shadow: 0 8px 24px rgba(16, 185, 129, 0.4);
446
+ opacity: 0;
447
+ transform: translateX(100px);
448
+ transition: all 0.3s ease-out;
449
+ }
450
+
451
+ .copy-toast.show {
452
+ opacity: 1;
453
+ transform: translateX(0);
454
+ }
455
+
456
+ .toast-content {
457
+ display: flex;
458
+ align-items: center;
459
+ gap: 12px;
460
+ }
461
+
462
+ .toast-icon {
463
+ font-size: 24px;
464
+ }
465
+
466
+ .toast-text {
467
+ font-size: 14px;
468
+ font-weight: 600;
469
+ }
470
+
471
+ /* Copy Fallback Modal */
472
+ .copy-fallback-modal {
473
+ position: fixed;
474
+ top: 0;
475
+ left: 0;
476
+ right: 0;
477
+ bottom: 0;
478
+ z-index: 10000;
479
+ background: rgba(0, 0, 0, 0.5);
480
+ display: flex;
481
+ align-items: center;
482
+ justify-content: center;
483
+ padding: 20px;
484
+ animation: fadeIn 0.2s;
485
+ }
486
+
487
+ @keyframes fadeIn {
488
+ from { opacity: 0; }
489
+ to { opacity: 1; }
490
+ }
491
+
492
+ .copy-fallback-modal .modal-content {
493
+ background: white;
494
+ border-radius: 12px;
495
+ padding: 24px;
496
+ max-width: 500px;
497
+ width: 100%;
498
+ box-shadow: 0 20px 60px rgba(0,0,0,0.3);
499
+ }
500
+
501
+ .copy-fallback-modal h3 {
502
+ margin: 0 0 12px 0;
503
+ font-size: 18px;
504
+ color: #1f2937;
505
+ }
506
+
507
+ .copy-fallback-modal p {
508
+ margin: 0 0 12px 0;
509
+ font-size: 13px;
510
+ color: #6b7280;
511
+ }
512
+
513
+ .copy-textarea {
514
+ width: 100%;
515
+ height: 200px;
516
+ padding: 12px;
517
+ border: 2px solid #e5e7eb;
518
+ border-radius: 6px;
519
+ font-family: 'Courier New', monospace;
520
+ font-size: 11px;
521
+ resize: vertical;
522
+ margin-bottom: 12px;
523
+ }
524
+
525
+ .copy-textarea:focus {
526
+ outline: none;
527
+ border-color: #667eea;
528
+ }
static/taskpane/taskpane.js CHANGED
@@ -136,6 +136,213 @@ async function writeToRange(data, startCell) {
136
  });
137
  }
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  async function writeForecastResults(timestamps, median, q10, q90, startRow) {
140
  return Excel.run(async (context) => {
141
  try {
@@ -248,6 +455,17 @@ async function forecastUnivariate() {
248
 
249
  log(`Received forecast for ${data.timestamps.length} periods`, 'success');
250
 
 
 
 
 
 
 
 
 
 
 
 
251
  // Escribir resultados
252
  await Excel.run(async (context) => {
253
  const selection = context.workbook.getSelectedRange();
 
136
  });
137
  }
138
 
139
+ // ====================================================================
140
+ // COPY TO CLIPBOARD FUNCTIONALITY
141
+ // ====================================================================
142
+
143
+ /**
144
+ * Format forecast results as TSV (Tab-Separated Values)
145
+ * Excel automatically recognizes TSV and creates a table
146
+ */
147
+ function formatForecastAsTSV(timestamps, median, q10, q90) {
148
+ // Header row
149
+ let tsv = 'Date\tForecast\tLower 10%\tUpper 90%\n';
150
+
151
+ // Data rows
152
+ for (let i = 0; i < timestamps.length; i++) {
153
+ const date = timestamps[i] || `Period ${i + 1}`;
154
+ const med = median[i]?.toFixed(2) || '';
155
+ const lower = q10[i]?.toFixed(2) || '';
156
+ const upper = q90[i]?.toFixed(2) || '';
157
+
158
+ tsv += `${date}\t${med}\t${lower}\t${upper}\n`;
159
+ }
160
+
161
+ return tsv;
162
+ }
163
+
164
+ /**
165
+ * Copy forecast results to clipboard
166
+ */
167
+ async function copyForecastToClipboard(timestamps, median, q10, q90) {
168
+ try {
169
+ console.log('[copyToClipboard] Formatting data...');
170
+
171
+ // Format as TSV
172
+ const tsv = formatForecastAsTSV(timestamps, median, q10, q90);
173
+
174
+ console.log('[copyToClipboard] TSV length:', tsv.length);
175
+ console.log('[copyToClipboard] Preview:', tsv.substring(0, 200));
176
+
177
+ // Copy to clipboard using Clipboard API
178
+ await navigator.clipboard.writeText(tsv);
179
+
180
+ console.log('[copyToClipboard] βœ… Copied successfully');
181
+
182
+ // User feedback
183
+ log('βœ… Forecast copied to clipboard! Paste in Excel with Ctrl+V', 'success');
184
+ showCopySuccessNotification();
185
+
186
+ return true;
187
+ } catch (error) {
188
+ console.error('[copyToClipboard] ❌ Error:', error);
189
+
190
+ // Fallback: Show modal with selectable text
191
+ showCopyFallbackModal(formatForecastAsTSV(timestamps, median, q10, q90));
192
+ log('⚠️ Please select and copy the text manually', 'warning');
193
+
194
+ return false;
195
+ }
196
+ }
197
+
198
+ /**
199
+ * Show temporary success notification
200
+ */
201
+ function showCopySuccessNotification() {
202
+ // Create toast notification
203
+ const toast = document.createElement('div');
204
+ toast.className = 'copy-toast';
205
+ toast.innerHTML = `
206
+ <div class="toast-content">
207
+ <span class="toast-icon">πŸ“‹</span>
208
+ <span class="toast-text">Copied to clipboard!</span>
209
+ </div>
210
+ `;
211
+
212
+ document.body.appendChild(toast);
213
+
214
+ // Animate in
215
+ setTimeout(() => toast.classList.add('show'), 10);
216
+
217
+ // Remove after 3 seconds
218
+ setTimeout(() => {
219
+ toast.classList.remove('show');
220
+ setTimeout(() => toast.remove(), 300);
221
+ }, 3000);
222
+ }
223
+
224
+ /**
225
+ * Show fallback modal if clipboard API fails
226
+ */
227
+ function showCopyFallbackModal(text) {
228
+ // Create modal
229
+ const modal = document.createElement('div');
230
+ modal.className = 'copy-fallback-modal';
231
+ modal.innerHTML = `
232
+ <div class="modal-content">
233
+ <h3>Copy Forecast Results</h3>
234
+ <p>Select all text below and copy (Ctrl+C or Cmd+C):</p>
235
+ <textarea readonly class="copy-textarea">${text}</textarea>
236
+ <button onclick="this.parentElement.parentElement.remove()" class="btn btn-secondary">
237
+ Close
238
+ </button>
239
+ </div>
240
+ `;
241
+
242
+ document.body.appendChild(modal);
243
+
244
+ // Auto-select text
245
+ const textarea = modal.querySelector('.copy-textarea');
246
+ textarea.focus();
247
+ textarea.select();
248
+ }
249
+
250
+ /**
251
+ * Show forecast preview with copy button
252
+ */
253
+ function showForecastPreview(forecastData) {
254
+ const { timestamps, median, q10, q90 } = forecastData;
255
+
256
+ // Create preview HTML
257
+ let previewHTML = `
258
+ <div class="forecast-preview-card">
259
+ <div class="preview-header">
260
+ <h3>πŸ“Š Forecast Preview</h3>
261
+ <span class="preview-count">${timestamps.length} periods</span>
262
+ </div>
263
+ <div class="preview-table-container">
264
+ <table class="preview-table">
265
+ <thead>
266
+ <tr>
267
+ <th>Date</th>
268
+ <th>Forecast</th>
269
+ <th>Lower</th>
270
+ <th>Upper</th>
271
+ </tr>
272
+ </thead>
273
+ <tbody>
274
+ `;
275
+
276
+ // Show first 5 rows
277
+ const displayRows = Math.min(5, timestamps.length);
278
+ for (let i = 0; i < displayRows; i++) {
279
+ previewHTML += `
280
+ <tr>
281
+ <td>${timestamps[i] || `P${i+1}`}</td>
282
+ <td>${median[i].toFixed(2)}</td>
283
+ <td>${q10[i].toFixed(2)}</td>
284
+ <td>${q90[i].toFixed(2)}</td>
285
+ </tr>
286
+ `;
287
+ }
288
+
289
+ if (timestamps.length > 5) {
290
+ previewHTML += `
291
+ <tr class="preview-more">
292
+ <td colspan="4">... and ${timestamps.length - 5} more rows</td>
293
+ </tr>
294
+ `;
295
+ }
296
+
297
+ previewHTML += `
298
+ </tbody>
299
+ </table>
300
+ </div>
301
+ <div class="preview-actions">
302
+ <button class="btn btn-primary btn-copy-forecast" onclick="copyLastForecast()">
303
+ πŸ“‹ Copy to Clipboard
304
+ </button>
305
+ <div class="preview-hint">
306
+ πŸ’‘ Click to copy, then paste in Excel with Ctrl+V
307
+ </div>
308
+ </div>
309
+ </div>
310
+ `;
311
+
312
+ // Find or create preview container
313
+ let previewContainer = document.getElementById('forecast-preview');
314
+ if (!previewContainer) {
315
+ previewContainer = document.createElement('div');
316
+ previewContainer.id = 'forecast-preview';
317
+
318
+ // Insert after results log
319
+ const resultsCard = document.querySelector('.results-card');
320
+ if (resultsCard) {
321
+ resultsCard.parentNode.insertBefore(previewContainer, resultsCard);
322
+ } else {
323
+ document.querySelector('.container').appendChild(previewContainer);
324
+ }
325
+ }
326
+
327
+ previewContainer.innerHTML = previewHTML;
328
+
329
+ // Scroll to preview
330
+ previewContainer.scrollIntoView({ behavior: 'smooth', block: 'nearest' });
331
+ }
332
+
333
+ /**
334
+ * Copy last forecast (called from button)
335
+ */
336
+ function copyLastForecast() {
337
+ if (!window.lastForecastData) {
338
+ log('⚠️ No forecast data available to copy', 'warning');
339
+ return;
340
+ }
341
+
342
+ const { timestamps, median, q10, q90 } = window.lastForecastData;
343
+ copyForecastToClipboard(timestamps, median, q10, q90);
344
+ }
345
+
346
  async function writeForecastResults(timestamps, median, q10, q90, startRow) {
347
  return Excel.run(async (context) => {
348
  try {
 
455
 
456
  log(`Received forecast for ${data.timestamps.length} periods`, 'success');
457
 
458
+ // Store forecast data globally for copy function
459
+ window.lastForecastData = {
460
+ timestamps: data.timestamps,
461
+ median: data.median,
462
+ q10: data.quantiles['0.1'],
463
+ q90: data.quantiles['0.9']
464
+ };
465
+
466
+ // Show preview with copy button
467
+ showForecastPreview(window.lastForecastData);
468
+
469
  // Escribir resultados
470
  await Excel.run(async (context) => {
471
  const selection = context.workbook.getSelectedRange();