summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm
diff options
context:
space:
mode:
authorJeff Fan <jeff.fan@intel.com>2016-06-27 15:41:50 +0800
committerMichael Kinney <michael.d.kinney@intel.com>2016-07-14 08:57:41 -0700
commit8b9311b79557311e137d0ffdc7934fea3966b0d7 (patch)
treefec569ad1da16db58cf3d3acd7438fde1fcc86ea /UefiCpuPkg/PiSmmCpuDxeSmm
parent77d172b76d4c14a667a85014563cc41d254044a4 (diff)
downloaddevice_linaro_bootloader_edk2-8b9311b79557311e137d0ffdc7934fea3966b0d7.tar.gz
device_linaro_bootloader_edk2-8b9311b79557311e137d0ffdc7934fea3966b0d7.tar.bz2
device_linaro_bootloader_edk2-8b9311b79557311e137d0ffdc7934fea3966b0d7.zip
UefiCpuPkg/PiSmmCpuDxeSmm: Remove duplicate aligned buffer on S3 path
InitializeMpSyncData() invokes InitializeSmmCpuSemaphores() to allocate an aligned buffer for all locks and semaphores. However, this function is invoked on S3 resume path again to reset mSmmMpSyncData. It causes an additional aligned buffer to be allocated. This update moves InitializeSmmCpuSemaphores() into InitializeMpServiceData() that is only invoked on normal boot. InitializeMpSyncData() is updated to reset the locks/semaphore in mSmmMpSyncData. Cc: Michael Kinney <michael.d.kinney@intel.com> Cc: Feng Tian <feng.tian@intel.com> Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Jeff Fan <jeff.fan@intel.com> Reviewed-by: Feng Tian <feng.tian@intel.com> Reviewed-by: Michael Kinney <michael.d.kinney@intel.com> Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
index add770ff3..62d037cdb 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -1205,7 +1205,6 @@ InitializeSmmCpuSemaphores (
VOID
)
{
- UINTN CpuIndex;
UINTN ProcessorCount;
UINTN TotalSize;
UINTN GlobalSemaphoresSize;
@@ -1240,7 +1239,6 @@ InitializeSmmCpuSemaphores (
SemaphoreAddr += SemaphoreSize;
mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
= (SPIN_LOCK *)SemaphoreAddr;
-
SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
SemaphoreAddr += ProcessorCount * SemaphoreSize;
@@ -1254,21 +1252,9 @@ InitializeSmmCpuSemaphores (
((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;
ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);
- mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
- mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
- mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
- for (CpuIndex = 0; CpuIndex < ProcessorCount; CpuIndex ++) {
- mSmmMpSyncData->CpuData[CpuIndex].Busy =
- (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + SemaphoreSize * CpuIndex);
- mSmmMpSyncData->CpuData[CpuIndex].Run =
- (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + SemaphoreSize * CpuIndex);
- mSmmMpSyncData->CpuData[CpuIndex].Present =
- (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + SemaphoreSize * CpuIndex);
- }
-
mSemaphoreSize = SemaphoreSize;
}
@@ -1282,8 +1268,10 @@ InitializeMpSyncData (
VOID
)
{
+ UINTN CpuIndex;
+
if (mSmmMpSyncData != NULL) {
- ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
+ mSmmMpSyncData->SwitchBsp = FALSE;
mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
@@ -1294,7 +1282,23 @@ InitializeMpSyncData (
}
mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);
- InitializeSmmCpuSemaphores ();
+ mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
+ mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
+ mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
+ ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
+ mSmmMpSyncData->AllCpusInSync != NULL);
+ *mSmmMpSyncData->Counter = 0;
+ *mSmmMpSyncData->InsideSmm = FALSE;
+ *mSmmMpSyncData->AllCpusInSync = FALSE;
+
+ for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
+ mSmmMpSyncData->CpuData[CpuIndex].Busy =
+ (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
+ mSmmMpSyncData->CpuData[CpuIndex].Run =
+ (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
+ mSmmMpSyncData->CpuData[CpuIndex].Present =
+ (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
+ }
}
}
@@ -1319,6 +1323,11 @@ InitializeMpServiceData (
UINTN GdtTableStepSize;
//
+ // Allocate memory for all locks and semaphores
+ //
+ InitializeSmmCpuSemaphores ();
+
+ //
// Initialize mSmmMpSyncData
//
mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +