mirror of
https://mirror.skon.top/github.com/langgenius/dify.git
synced 2026-04-30 17:50:29 +08:00
fix(web): default metrics for dataset
This commit is contained in:
@@ -6,7 +6,7 @@ import ConditionsSection from '../components/conditions-section'
|
||||
import { useEvaluationStore } from '../store'
|
||||
|
||||
const mockUpload = vi.hoisted(() => vi.fn())
|
||||
const mockUseAvailableEvaluationMetrics = vi.hoisted(() => vi.fn())
|
||||
const mockUseDatasetEvaluationMetrics = vi.hoisted(() => vi.fn())
|
||||
const mockUseDefaultEvaluationMetrics = vi.hoisted(() => vi.fn())
|
||||
const mockUseEvaluationConfig = vi.hoisted(() => vi.fn())
|
||||
const mockUseSaveEvaluationConfigMutation = vi.hoisted(() => vi.fn())
|
||||
@@ -51,7 +51,7 @@ vi.mock('@/service/base', () => ({
|
||||
|
||||
vi.mock('@/service/use-evaluation', () => ({
|
||||
useEvaluationConfig: (...args: unknown[]) => mockUseEvaluationConfig(...args),
|
||||
useAvailableEvaluationMetrics: (...args: unknown[]) => mockUseAvailableEvaluationMetrics(...args),
|
||||
useDatasetEvaluationMetrics: (...args: unknown[]) => mockUseDatasetEvaluationMetrics(...args),
|
||||
useDefaultEvaluationMetrics: (...args: unknown[]) => mockUseDefaultEvaluationMetrics(...args),
|
||||
useSaveEvaluationConfigMutation: (...args: unknown[]) => mockUseSaveEvaluationConfigMutation(...args),
|
||||
useStartEvaluationRunMutation: (...args: unknown[]) => mockUseStartEvaluationRunMutation(...args),
|
||||
@@ -119,7 +119,7 @@ describe('Evaluation', () => {
|
||||
data: null,
|
||||
})
|
||||
|
||||
mockUseAvailableEvaluationMetrics.mockReturnValue({
|
||||
mockUseDatasetEvaluationMetrics.mockReturnValue({
|
||||
data: {
|
||||
metrics: ['answer-correctness', 'faithfulness', 'context-precision', 'context-recall', 'context-relevance'],
|
||||
},
|
||||
@@ -582,6 +582,7 @@ describe('Evaluation', () => {
|
||||
it('should render the pipeline-specific layout without auto-selecting a judge model', () => {
|
||||
renderWithQueryClient(<Evaluation resourceType="datasets" resourceId="dataset-1" />)
|
||||
|
||||
expect(mockUseDatasetEvaluationMetrics).toHaveBeenCalledWith('dataset-1')
|
||||
expect(screen.getByTestId('evaluation-model-selector')).toHaveTextContent('empty')
|
||||
expect(screen.getByText('evaluation.history.columns.time')).toBeInTheDocument()
|
||||
expect(screen.getByText('Context Precision')).toBeInTheDocument()
|
||||
|
||||
@@ -7,7 +7,7 @@ import { useEffect, useMemo } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { BlockEnum } from '@/app/components/workflow/types'
|
||||
import { useDatasetDetailContextWithSelector } from '@/context/dataset-detail'
|
||||
import { useAvailableEvaluationMetrics } from '@/service/use-evaluation'
|
||||
import { useDatasetEvaluationMetrics } from '@/service/use-evaluation'
|
||||
import { usePublishedPipelineInfo } from '@/service/use-pipeline'
|
||||
import { useEvaluationResource, useEvaluationStore } from '../../store'
|
||||
import { buildMetricOption } from '../metric-selector/utils'
|
||||
@@ -49,7 +49,7 @@ const PipelineMetricsSection = ({
|
||||
const addBuiltinMetric = useEvaluationStore(state => state.addBuiltinMetric)
|
||||
const removeMetric = useEvaluationStore(state => state.removeMetric)
|
||||
const updateMetricThreshold = useEvaluationStore(state => state.updateMetricThreshold)
|
||||
const { data: availableMetricsData } = useAvailableEvaluationMetrics()
|
||||
const { data: datasetMetricsData } = useDatasetEvaluationMetrics(resourceId)
|
||||
const { data: publishedPipeline } = usePublishedPipelineInfo(pipelineId || '')
|
||||
const resource = useEvaluationResource(resourceType, resourceId)
|
||||
const knowledgeIndexNodeInfoList = useMemo(
|
||||
@@ -63,12 +63,12 @@ const PipelineMetricsSection = ({
|
||||
), [resource.metrics])
|
||||
const availableBuiltinMetrics = useMemo(() => {
|
||||
const metricIds = new Set([
|
||||
...(availableMetricsData?.metrics ?? []),
|
||||
...(datasetMetricsData?.metrics ?? []),
|
||||
...builtinMetricMap.keys(),
|
||||
])
|
||||
|
||||
return Array.from(metricIds).map(metricId => buildMetricOption(metricId))
|
||||
}, [availableMetricsData?.metrics, builtinMetricMap])
|
||||
}, [datasetMetricsData?.metrics, builtinMetricMap])
|
||||
|
||||
useEffect(() => {
|
||||
if (!knowledgeIndexNodeInfoList.length)
|
||||
|
||||
@@ -284,13 +284,6 @@ export const evaluationNodeInfoContract = base
|
||||
}>())
|
||||
.output(type<EvaluationNodeInfoResponse>())
|
||||
|
||||
export const availableEvaluationMetricsContract = base
|
||||
.route({
|
||||
path: '/evaluation/available-metrics',
|
||||
method: 'GET',
|
||||
})
|
||||
.output(type<EvaluationMetricsListResponse>())
|
||||
|
||||
export const availableEvaluationWorkflowsContract = base
|
||||
.route({
|
||||
path: '/workspaces/current/available-evaluation-workflows',
|
||||
|
||||
@@ -3,7 +3,6 @@ import { accountAvatarContract } from './console/account'
|
||||
import { appDeleteContract, appWorkflowTypeConvertContract, workflowOnlineUsersContract } from './console/apps'
|
||||
import { bindPartnerStackContract, invoicesContract } from './console/billing'
|
||||
import {
|
||||
availableEvaluationMetricsContract,
|
||||
availableEvaluationWorkflowsContract,
|
||||
cancelDatasetEvaluationRunContract,
|
||||
cancelEvaluationRunContract,
|
||||
@@ -148,7 +147,6 @@ export const consoleRouterContract = {
|
||||
metrics: evaluationMetricsContract,
|
||||
defaultMetrics: evaluationDefaultMetricsContract,
|
||||
nodeInfo: evaluationNodeInfoContract,
|
||||
availableMetrics: availableEvaluationMetricsContract,
|
||||
availableWorkflows: availableEvaluationWorkflowsContract,
|
||||
associatedTargets: evaluationWorkflowAssociatedTargetsContract,
|
||||
file: evaluationFileContract,
|
||||
|
||||
@@ -54,9 +54,17 @@ export const useEvaluationConfig = (
|
||||
return useQuery<EvaluationConfig>(getEvaluationConfigQueryOptions(resourceType, resourceId))
|
||||
}
|
||||
|
||||
export const useAvailableEvaluationMetrics = (enabled = true) => {
|
||||
return useQuery(consoleQuery.evaluation.availableMetrics.queryOptions({
|
||||
enabled,
|
||||
export const useDatasetEvaluationMetrics = (datasetId: string, enabled = true) => {
|
||||
return useQuery(consoleQuery.datasetEvaluation.metrics.queryOptions({
|
||||
input: datasetId
|
||||
? {
|
||||
params: {
|
||||
datasetId,
|
||||
},
|
||||
}
|
||||
: skipToken,
|
||||
enabled: !!datasetId && enabled,
|
||||
refetchOnWindowFocus: false,
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user