Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"name": "test-workspace",
"private": true,
"scripts": {
"build": "vp run -r build"
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"name": "@test/a",
"version": "1.0.0",
"scripts": {
"build": "echo building-a"
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"name": "@test/b",
"version": "1.0.0",
"scripts": {
"build": "echo building-b"
},
"dependencies": {
"@test/a": "workspace:*"
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
packages:
- 'packages/*'
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Tests that workspace root self-referencing tasks don't cause infinite recursion.
# Root build = `vp run -r build` (delegates to all packages recursively).
#
# Skip rule: `vp run -r build` from root produces the same query as the
# nested `vp run -r build` in root's script, so root's expansion is skipped.
# Only packages a and b actually run.
#
# Prune rule: `vp run build` from root produces a ContainingPackage query,
# but root's script `vp run -r build` produces an All query. The queries
# differ so the skip rule doesn't fire. Instead the prune rule removes root
# from the nested result, leaving only a and b.

[[e2e]]
name = "recursive build skips root self-reference"
steps = [
"vp run -r build # only a and b run, root is skipped",
]

[[e2e]]
name = "build from root prunes root from nested expansion"
steps = [
"vp run build # only a and b run under root, root is pruned",
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---
source: crates/vite_task_bin/tests/e2e_snapshots/main.rs
expression: e2e_outputs
---
> vp run build # only a and b run under root, root is pruned
~/packages/a$ echo building-a ⊘ cache disabled
building-a

~/packages/b$ echo building-b ⊘ cache disabled
building-b

---
[vp run] 0/2 cache hit (0%). (Run `vp run --last-details` for full details)
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---
source: crates/vite_task_bin/tests/e2e_snapshots/main.rs
expression: e2e_outputs
---
> vp run -r build # only a and b run, root is skipped
~/packages/a$ echo building-a ⊘ cache disabled
building-a

~/packages/b$ echo building-b ⊘ cache disabled
building-b

---
[vp run] 0/2 cache hit (0%). (Run `vp run --last-details` for full details)
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"cache": true
}
15 changes: 14 additions & 1 deletion crates/vite_task_graph/src/config/user.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ impl ResolvedGlobalCacheConfig {
#[derive(Debug, Default, Deserialize)]
// TS derive macro generates code using std types that clippy disallows; skip derive during linting
#[cfg_attr(all(test, not(clippy)), derive(TS), ts(optional_fields, rename = "RunConfig"))]
#[serde(rename_all = "camelCase")]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
pub struct UserRunConfig {
/// Root-level cache configuration.
///
Expand Down Expand Up @@ -454,4 +454,17 @@ mod tests {
serde_json::from_value::<UserGlobalCacheConfig>(json!({ "unknown": true })).is_err()
);
}

#[test]
fn test_run_config_unknown_top_level_field() {
assert!(serde_json::from_value::<UserRunConfig>(json!({ "unknown": true })).is_err());
}

#[test]
fn test_task_config_unknown_field() {
assert!(
serde_json::from_value::<UserTaskConfig>(json!({ "command": "echo", "unknown": true }))
.is_err()
);
}
}
15 changes: 14 additions & 1 deletion crates/vite_task_graph/src/query/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,20 @@ use crate::{IndexedTaskGraph, TaskDependencyType, TaskId, TaskNodeIndex};
pub type TaskExecutionGraph = DiGraphMap<TaskNodeIndex, ()>;

/// A query for which tasks to run.
#[derive(Debug)]
///
/// A `TaskQuery` must be **self-contained**: it fully describes which tasks
/// will be selected, without relying on ambient state such as cwd or
/// environment variables. For example, the implicit cwd is captured as a
/// `ContainingPackage(path)` selector inside [`PackageQuery`], so two
/// queries from different directories compare as unequal even though the
/// user typed the same CLI arguments.
///
/// This property is essential for the **skip rule** in task planning, which
/// compares the nested query against the parent query with `==`. If any
/// external context leaked into the comparison (or was excluded from it),
/// the skip rule would either miss legitimate recursion or incorrectly
/// suppress distinct expansions.
#[derive(Debug, PartialEq)]
pub struct TaskQuery {
/// Which packages to select.
pub package_query: PackageQuery,
Expand Down
25 changes: 24 additions & 1 deletion crates/vite_task_plan/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ use std::{env::JoinPathsError, ffi::OsStr, ops::Range, sync::Arc};
use rustc_hash::FxHashMap;
use vite_path::AbsolutePath;
use vite_str::Str;
use vite_task_graph::{IndexedTaskGraph, TaskNodeIndex, config::ResolvedGlobalCacheConfig};
use vite_task_graph::{
IndexedTaskGraph, TaskNodeIndex, config::ResolvedGlobalCacheConfig, query::TaskQuery,
};

use crate::{PlanRequestParser, path_env::prepend_path_env};

Expand Down Expand Up @@ -42,6 +44,10 @@ pub struct PlanContext<'a> {

/// Final resolved global cache config, combining the graph's config with any CLI override.
resolved_global_cache: ResolvedGlobalCacheConfig,

/// The query that caused the current expansion.
/// Used by the skip rule to detect and skip duplicate nested expansions.
parent_query: Arc<TaskQuery>,
}

impl<'a> PlanContext<'a> {
Expand All @@ -52,6 +58,7 @@ impl<'a> PlanContext<'a> {
callbacks: &'a mut (dyn PlanRequestParser + 'a),
indexed_task_graph: &'a IndexedTaskGraph,
resolved_global_cache: ResolvedGlobalCacheConfig,
parent_query: Arc<TaskQuery>,
) -> Self {
Self {
workspace_path,
Expand All @@ -62,6 +69,7 @@ impl<'a> PlanContext<'a> {
indexed_task_graph,
extra_args: Arc::default(),
resolved_global_cache,
parent_query,
}
}

Expand Down Expand Up @@ -128,6 +136,20 @@ impl<'a> PlanContext<'a> {
self.resolved_global_cache = config;
}

pub fn parent_query(&self) -> &TaskQuery {
&self.parent_query
}

pub fn set_parent_query(&mut self, query: Arc<TaskQuery>) {
self.parent_query = query;
}

/// Returns the task currently being expanded (whose command triggered a nested query).
/// This is the last task on the call stack, or `None` at the top level.
pub fn expanding_task(&self) -> Option<TaskNodeIndex> {
self.task_call_stack.last().map(|(idx, _)| *idx)
}

pub fn duplicate(&mut self) -> PlanContext<'_> {
PlanContext {
workspace_path: self.workspace_path,
Expand All @@ -138,6 +160,7 @@ impl<'a> PlanContext<'a> {
indexed_task_graph: self.indexed_task_graph,
extra_args: Arc::clone(&self.extra_args),
resolved_global_cache: self.resolved_global_cache,
parent_query: Arc::clone(&self.parent_query),
}
}
}
5 changes: 4 additions & 1 deletion crates/vite_task_plan/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,15 +205,18 @@ pub async fn plan_query(
query_plan_request.plan_options.cache_override,
);

let QueryPlanRequest { query, plan_options } = query_plan_request;
let query = Arc::new(query);
let context = PlanContext::new(
workspace_path,
Arc::clone(cwd),
envs.clone(),
plan_request_parser,
indexed_task_graph,
resolved_global_cache,
Arc::clone(&query),
);
plan_query_request(query_plan_request, context).await
plan_query_request(query, plan_options, context).await
}

const fn resolve_cache_with_override(
Expand Down
91 changes: 77 additions & 14 deletions crates/vite_task_plan/src/plan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use std::{
};

use futures_util::FutureExt;
use petgraph::Direction;
use rustc_hash::FxHashMap;
use vite_path::{AbsolutePath, AbsolutePathBuf, RelativePathBuf, relative::InvalidPathDataError};
use vite_shell::try_parse_as_and_list;
Expand All @@ -22,6 +23,7 @@ use vite_task_graph::{
CacheConfig, ResolvedGlobalCacheConfig, ResolvedTaskOptions,
user::{UserCacheConfig, UserTaskOptions},
},
query::TaskQuery,
};

use crate::{
Expand All @@ -34,7 +36,8 @@ use crate::{
in_process::InProcessExecution,
path_env::get_path_env,
plan_request::{
CacheOverride, PlanRequest, QueryPlanRequest, ScriptCommand, SyntheticPlanRequest,
CacheOverride, PlanOptions, PlanRequest, QueryPlanRequest, ScriptCommand,
SyntheticPlanRequest,
},
resolve_cache_with_override,
};
Expand Down Expand Up @@ -197,17 +200,34 @@ async fn plan_task_as_execution_node(
let execution_item_kind: ExecutionItemKind = match plan_request {
// Expand task query like `vp run -r build`
Some(PlanRequest::Query(query_plan_request)) => {
// Skip rule: skip if this nested query is the same as the parent expansion.
// This handles workspace root tasks like `"build": "vp run -r build"` —
// re-entering the same query would just re-expand the same tasks.
//
// The comparison is on TaskQuery only (package_query + task_name +
// include_explicit_deps). Extra args live in PlanOptions, so
// `vp run -r build extra_arg` still matches `vp run -r build`.
// Conversely, `cd packages/a && vp run build` does NOT match a
// parent `vp run build` from root because `cd` changes the cwd,
// producing a different ContainingPackage in the PackageQuery.
if query_plan_request.query == *context.parent_query() {
continue;
}

// Save task name before consuming the request
let task_name = query_plan_request.query.task_name.clone();
// Add prefix envs to the context
context.add_envs(and_item.envs.iter());
let execution_graph = plan_query_request(query_plan_request, context)
.await
.map_err(|error| Error::NestPlan {
task_display: task_node.task_display.clone(),
command: Str::from(&command_str[add_item_span.clone()]),
error: Box::new(error),
})?;
let QueryPlanRequest { query, plan_options } = query_plan_request;
let query = Arc::new(query);
let execution_graph =
plan_query_request(Arc::clone(&query), plan_options, context)
.await
.map_err(|error| Error::NestPlan {
task_display: task_node.task_display.clone(),
command: Str::from(&command_str[add_item_span.clone()]),
error: Box::new(error),
})?;
// An empty execution graph means no tasks matched the query.
// At the top level the session shows the task selector UI,
// but in a nested context there is no UI — propagate as an error.
Expand Down Expand Up @@ -552,17 +572,27 @@ fn plan_spawn_execution(
///
/// Builds a `DiGraph` of task executions, then validates it is acyclic via
/// `ExecutionGraph::try_from_graph`. Returns `CycleDependencyDetected` if a cycle is found.
///
/// **Prune rule:** If the expanding task (the task whose command triggered
/// this nested query) appears in the expansion result, it is pruned from the graph
/// and its predecessors are wired directly to its successors. This prevents
/// workspace root tasks like `"build": "vp run -r build"` from infinitely
/// re-expanding themselves when a different query reaches them (e.g.,
/// `vp run build` produces a different query than the script's `vp run -r build`,
/// so the skip rule doesn't fire, but the prune rule catches root in the result).
/// Like the skip rule, extra args don't affect this — only the `TaskQuery` matters.
#[expect(clippy::future_not_send, reason = "PlanContext contains !Send dyn PlanRequestParser")]
pub async fn plan_query_request(
query_plan_request: QueryPlanRequest,
query: Arc<TaskQuery>,
plan_options: PlanOptions,
mut context: PlanContext<'_>,
) -> Result<ExecutionGraph, Error> {
// Apply cache override from `--cache` / `--no-cache` flags on this request.
//
// When `None`, we skip the update so the context keeps whatever the parent
// resolved — this is how `vp run --cache outer` propagates to a nested
// `vp run inner` that has no flags of its own.
let cache_override = query_plan_request.plan_options.cache_override;
let cache_override = plan_options.cache_override;
if cache_override != CacheOverride::None {
// Override is relative to the *workspace* config, not the parent's
// resolved config. This means `vp run --no-cache outer` where outer
Expand All @@ -574,11 +604,13 @@ pub async fn plan_query_request(
);
context.set_resolved_global_cache(final_cache);
}
context.set_extra_args(Arc::clone(&query_plan_request.plan_options.extra_args));
context.set_extra_args(plan_options.extra_args);
context.set_parent_query(Arc::clone(&query));

// Query matching tasks from the task graph.
// An empty graph means no tasks matched; the caller (session) handles
// empty graphs by showing the task selector.
let task_query_result = context.indexed_task_graph().query_tasks(&query_plan_request.query)?;
let task_query_result = context.indexed_task_graph().query_tasks(&query)?;

#[expect(clippy::print_stderr, reason = "user-facing warning for typos in --filter")]
for selector in &task_query_result.unmatched_selectors {
Expand All @@ -587,6 +619,12 @@ pub async fn plan_query_request(

let task_node_index_graph = task_query_result.execution_graph;

// Prune rule: if the expanding task appears in the expansion, prune it.
// This handles cases like root `"build": "vp run build"` — the root's build
// task is in the result but expanding it would recurse, so we remove it and
// reconnect its predecessors directly to its successors.
let pruned_task = context.expanding_task().filter(|t| task_node_index_graph.contains_node(*t));

let mut execution_node_indices_by_task_index =
FxHashMap::<TaskNodeIndex, ExecutionNodeIndex>::with_capacity_and_hasher(
task_node_index_graph.node_count(),
Expand All @@ -599,23 +637,48 @@ pub async fn plan_query_request(
task_node_index_graph.edge_count(),
);

// Plan each task node as execution nodes
// Plan each task node as execution nodes, skipping the pruned task
for task_index in task_node_index_graph.nodes() {
if Some(task_index) == pruned_task {
continue;
}
let task_execution =
plan_task_as_execution_node(task_index, context.duplicate()).boxed_local().await?;
execution_node_indices_by_task_index
.insert(task_index, inner_graph.add_node(task_execution));
}

// Add edges between execution nodes according to task dependencies
// Add edges between execution nodes according to task dependencies,
// skipping edges involving the pruned task.
for (from_task_index, to_task_index, ()) in task_node_index_graph.all_edges() {
if Some(from_task_index) == pruned_task || Some(to_task_index) == pruned_task {
continue;
}
inner_graph.add_edge(
execution_node_indices_by_task_index[&from_task_index],
execution_node_indices_by_task_index[&to_task_index],
(),
);
}

// Reconnect through the pruned node: wire each predecessor directly to each successor.
if let Some(pruned) = pruned_task {
let preds: Vec<_> =
task_node_index_graph.neighbors_directed(pruned, Direction::Incoming).collect();
let succs: Vec<_> =
task_node_index_graph.neighbors_directed(pruned, Direction::Outgoing).collect();
for &pred in &preds {
for &succ in &succs {
if let (Some(&pe), Some(&se)) = (
execution_node_indices_by_task_index.get(&pred),
execution_node_indices_by_task_index.get(&succ),
) {
inner_graph.add_edge(pe, se, ());
}
}
}
}

// Validate the graph is acyclic.
// `try_from_graph` performs a DFS; if a cycle is found, it returns
// `CycleError` containing the full cycle path as node indices.
Expand Down
Loading