Merge pull request #2957 from halseth/taketa-goroutine-snapshot
[lnd test] write goroutine snapshot on integration test failure
This commit is contained in:
commit
0193034f4f
1
.gitignore
vendored
1
.gitignore
vendored
@ -33,6 +33,7 @@ _testmain.go
|
||||
|
||||
# Integration test log files
|
||||
lntest/itest/output*.log
|
||||
lntest/itest/pprof*.log
|
||||
lntest/itest/.backendlogs
|
||||
lntest/itest/.minerlogs
|
||||
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -648,6 +649,65 @@ func (n *NetworkHarness) StopNode(node *HarnessNode) error {
|
||||
return node.stop()
|
||||
}
|
||||
|
||||
// SaveProfilesPages hits profiles pages of all active nodes and writes it to
|
||||
// disk using a similar naming scheme as to the regular set of logs.
|
||||
func (n *NetworkHarness) SaveProfilesPages() {
|
||||
// Only write gorutine dumps if flag is active.
|
||||
if !(*goroutineDump) {
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range n.activeNodes {
|
||||
if err := saveProfilesPage(node); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// saveProfilesPage saves the profiles page for the given node to file.
|
||||
func saveProfilesPage(node *HarnessNode) error {
|
||||
resp, err := http.Get(
|
||||
fmt.Sprintf(
|
||||
"http://localhost:%d/debug/pprof/goroutine?debug=1",
|
||||
node.cfg.ProfilePort,
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get profile page "+
|
||||
"(node_id=%d, name=%s): %v\n",
|
||||
node.NodeID, node.cfg.Name, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to read profile page "+
|
||||
"(node_id=%d, name=%s): %v\n",
|
||||
node.NodeID, node.cfg.Name, err)
|
||||
}
|
||||
|
||||
fileName := fmt.Sprintf(
|
||||
"pprof-%d-%s-%s.log", node.NodeID, node.cfg.Name,
|
||||
hex.EncodeToString(node.PubKey[:logPubKeyBytes]),
|
||||
)
|
||||
|
||||
logFile, err := os.Create(fileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create file for profile page "+
|
||||
"(node_id=%d, name=%s): %v\n",
|
||||
node.NodeID, node.cfg.Name, err)
|
||||
}
|
||||
defer logFile.Close()
|
||||
|
||||
_, err = logFile.Write(body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to save profile page "+
|
||||
"(node_id=%d, name=%s): %v\n",
|
||||
node.NodeID, node.cfg.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(roasbeef): add a WithChannel higher-order function?
|
||||
// * python-like context manager w.r.t using a channel within a test
|
||||
// * possibly adds more funds to the target wallet if the funds are not
|
||||
|
@ -73,12 +73,16 @@ type harnessTest struct {
|
||||
// testCase is populated during test execution and represents the
|
||||
// current test case.
|
||||
testCase *testCase
|
||||
|
||||
// lndHarness is a reference to the current network harness. Will be
|
||||
// nil if not yet set up.
|
||||
lndHarness *lntest.NetworkHarness
|
||||
}
|
||||
|
||||
// newHarnessTest creates a new instance of a harnessTest from a regular
|
||||
// testing.T instance.
|
||||
func newHarnessTest(t *testing.T) *harnessTest {
|
||||
return &harnessTest{t, nil}
|
||||
func newHarnessTest(t *testing.T, net *lntest.NetworkHarness) *harnessTest {
|
||||
return &harnessTest{t, nil, net}
|
||||
}
|
||||
|
||||
// Skipf calls the underlying testing.T's Skip method, causing the current test
|
||||
@ -91,6 +95,10 @@ func (h *harnessTest) Skipf(format string, args ...interface{}) {
|
||||
// integration tests should mark test failures solely with this method due to
|
||||
// the error stack traces it produces.
|
||||
func (h *harnessTest) Fatalf(format string, a ...interface{}) {
|
||||
if h.lndHarness != nil {
|
||||
h.lndHarness.SaveProfilesPages()
|
||||
}
|
||||
|
||||
stacktrace := errors.Wrap(fmt.Sprintf(format, a...), 1).ErrorStack()
|
||||
|
||||
if h.testCase != nil {
|
||||
@ -103,8 +111,7 @@ func (h *harnessTest) Fatalf(format string, a ...interface{}) {
|
||||
|
||||
// RunTestCase executes a harness test case. Any errors or panics will be
|
||||
// represented as fatal.
|
||||
func (h *harnessTest) RunTestCase(testCase *testCase,
|
||||
net *lntest.NetworkHarness) {
|
||||
func (h *harnessTest) RunTestCase(testCase *testCase) {
|
||||
|
||||
h.testCase = testCase
|
||||
defer func() {
|
||||
@ -119,7 +126,7 @@ func (h *harnessTest) RunTestCase(testCase *testCase,
|
||||
}
|
||||
}()
|
||||
|
||||
testCase.test(net, h)
|
||||
testCase.test(h.lndHarness, h)
|
||||
|
||||
return
|
||||
}
|
||||
@ -13513,7 +13520,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
success := t.t.Run(testCase.name, func(t *testing.T) {
|
||||
h := newHarnessTest(t)
|
||||
h := newHarnessTest(t, net)
|
||||
testChanRestoreScenario(h, net, &testCase, password)
|
||||
})
|
||||
if !success {
|
||||
@ -14267,7 +14274,7 @@ var testsCases = []*testCase{
|
||||
// TestLightningNetworkDaemon performs a series of integration tests amongst a
|
||||
// programmatically driven network of lnd nodes.
|
||||
func TestLightningNetworkDaemon(t *testing.T) {
|
||||
ht := newHarnessTest(t)
|
||||
ht := newHarnessTest(t, nil)
|
||||
|
||||
// Declare the network harness here to gain access to its
|
||||
// 'OnTxAccepted' call back.
|
||||
@ -14391,8 +14398,8 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
}
|
||||
|
||||
success := t.Run(testCase.name, func(t1 *testing.T) {
|
||||
ht := newHarnessTest(t1)
|
||||
ht.RunTestCase(testCase, lndHarness)
|
||||
ht := newHarnessTest(t1, lndHarness)
|
||||
ht.RunTestCase(testCase)
|
||||
})
|
||||
|
||||
// Stop at the first failure. Mimic behavior of original test
|
||||
|
@ -33,35 +33,34 @@ import (
|
||||
macaroon "gopkg.in/macaroon.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// numActiveNodes is the number of active nodes within the test network.
|
||||
numActiveNodes = 0
|
||||
|
||||
const (
|
||||
// defaultNodePort is the initial p2p port which will be used by the
|
||||
// first created lightning node to listen on for incoming p2p
|
||||
// connections. Subsequent allocated ports for future Lightning nodes
|
||||
// instances will be monotonically increasing numbers calculated as
|
||||
// such: defaultP2pPort + (3 * harness.nodeNum).
|
||||
// such: defaultP2pPort + (4 * harness.nodeNum).
|
||||
defaultNodePort = 19555
|
||||
|
||||
// defaultClientPort is the initial rpc port which will be used by the
|
||||
// first created lightning node to listen on for incoming rpc
|
||||
// connections. Subsequent allocated ports for future rpc harness
|
||||
// instances will be monotonically increasing numbers calculated
|
||||
// as such: defaultP2pPort + (3 * harness.nodeNum).
|
||||
defaultClientPort = 19556
|
||||
// as such: defaultP2pPort + (4 * harness.nodeNum).
|
||||
defaultClientPort = defaultNodePort + 1
|
||||
|
||||
// defaultRestPort is the initial rest port which will be used by the
|
||||
// first created lightning node to listen on for incoming rest
|
||||
// connections. Subsequent allocated ports for future rpc harness
|
||||
// instances will be monotonically increasing numbers calculated
|
||||
// as such: defaultP2pPort + (3 * harness.nodeNum).
|
||||
defaultRestPort = 19557
|
||||
// as such: defaultP2pPort + (4 * harness.nodeNum).
|
||||
defaultRestPort = defaultNodePort + 2
|
||||
|
||||
// logOutput is a flag that can be set to append the output from the
|
||||
// seed nodes to log files.
|
||||
logOutput = flag.Bool("logoutput", false,
|
||||
"log output from node n to file outputn.log")
|
||||
// defaultProfilePort is the initial port which will be used for
|
||||
// profiling by the first created lightning node. Subsequent allocated
|
||||
// ports for future rpc harness instances will be monotonically
|
||||
// increasing numbers calculated as such:
|
||||
// defaultProfilePort + (4 * harness.nodeNum).
|
||||
defaultProfilePort = defaultNodePort + 3
|
||||
|
||||
// logPubKeyBytes is the number of bytes of the node's PubKey that
|
||||
// will be appended to the log file name. The whole PubKey is too
|
||||
@ -74,24 +73,33 @@ var (
|
||||
trickleDelay = 50
|
||||
)
|
||||
|
||||
var (
|
||||
// numActiveNodes is the number of active nodes within the test network.
|
||||
numActiveNodes = 0
|
||||
|
||||
// logOutput is a flag that can be set to append the output from the
|
||||
// seed nodes to log files.
|
||||
logOutput = flag.Bool("logoutput", false,
|
||||
"log output from node n to file output-n.log")
|
||||
|
||||
// goroutineDump is a flag that can be set to dump the active
|
||||
// goroutines of test nodes on failure.
|
||||
goroutineDump = flag.Bool("goroutinedump", false,
|
||||
"write goroutine dump from node n to file pprof-n.log")
|
||||
)
|
||||
|
||||
// generateListeningPorts returns three ints representing ports to listen on
|
||||
// designated for the current lightning network test. If there haven't been any
|
||||
// test instances created, the default ports are used. Otherwise, in order to
|
||||
// support multiple test nodes running at once, the p2p, rpc, and rest ports
|
||||
// are incremented after each initialization.
|
||||
func generateListeningPorts() (int, int, int) {
|
||||
var p2p, rpc, rest int
|
||||
if numActiveNodes == 0 {
|
||||
p2p = defaultNodePort
|
||||
rpc = defaultClientPort
|
||||
rest = defaultRestPort
|
||||
} else {
|
||||
p2p = defaultNodePort + (3 * numActiveNodes)
|
||||
rpc = defaultClientPort + (3 * numActiveNodes)
|
||||
rest = defaultRestPort + (3 * numActiveNodes)
|
||||
}
|
||||
// support multiple test nodes running at once, the p2p, rpc, rest and
|
||||
// profiling ports are incremented after each initialization.
|
||||
func generateListeningPorts() (int, int, int, int) {
|
||||
p2p := defaultNodePort + (4 * numActiveNodes)
|
||||
rpc := defaultClientPort + (4 * numActiveNodes)
|
||||
rest := defaultRestPort + (4 * numActiveNodes)
|
||||
profile := defaultProfilePort + (4 * numActiveNodes)
|
||||
|
||||
return p2p, rpc, rest
|
||||
return p2p, rpc, rest, profile
|
||||
}
|
||||
|
||||
// BackendConfig is an interface that abstracts away the specific chain backend
|
||||
@ -129,9 +137,10 @@ type nodeConfig struct {
|
||||
HasSeed bool
|
||||
Password []byte
|
||||
|
||||
P2PPort int
|
||||
RPCPort int
|
||||
RESTPort int
|
||||
P2PPort int
|
||||
RPCPort int
|
||||
RESTPort int
|
||||
ProfilePort int
|
||||
}
|
||||
|
||||
func (cfg nodeConfig) P2PAddr() string {
|
||||
@ -195,6 +204,7 @@ func (cfg nodeConfig) genArgs() []string {
|
||||
args = append(args, fmt.Sprintf("--readonlymacaroonpath=%v", cfg.ReadMacPath))
|
||||
args = append(args, fmt.Sprintf("--invoicemacaroonpath=%v", cfg.InvoiceMacPath))
|
||||
args = append(args, fmt.Sprintf("--trickledelay=%v", trickleDelay))
|
||||
args = append(args, fmt.Sprintf("--profile=%d", cfg.ProfilePort))
|
||||
|
||||
if !cfg.HasSeed {
|
||||
args = append(args, "--noseedbackup")
|
||||
@ -280,7 +290,7 @@ func newNode(cfg nodeConfig) (*HarnessNode, error) {
|
||||
cfg.ReadMacPath = filepath.Join(cfg.DataDir, "readonly.macaroon")
|
||||
cfg.InvoiceMacPath = filepath.Join(cfg.DataDir, "invoice.macaroon")
|
||||
|
||||
cfg.P2PPort, cfg.RPCPort, cfg.RESTPort = generateListeningPorts()
|
||||
cfg.P2PPort, cfg.RPCPort, cfg.RESTPort, cfg.ProfilePort = generateListeningPorts()
|
||||
|
||||
nodeNum := numActiveNodes
|
||||
numActiveNodes++
|
||||
@ -333,7 +343,6 @@ func (hn *HarnessNode) start(lndError chan<- error) error {
|
||||
hn.quit = make(chan struct{})
|
||||
|
||||
args := hn.cfg.genArgs()
|
||||
args = append(args, fmt.Sprintf("--profile=%d", 9000+hn.NodeID))
|
||||
hn.cmd = exec.Command("../../lnd-itest", args...)
|
||||
|
||||
// Redirect stderr output to buffer
|
||||
|
@ -65,4 +65,4 @@ else
|
||||
ITEST_TAGS += btcd
|
||||
endif
|
||||
|
||||
ITEST := rm lntest/itest/output*.log; date; $(GOTEST) ./lntest/itest -tags="$(ITEST_TAGS)" $(TEST_FLAGS) -logoutput
|
||||
ITEST := rm lntest/itest/*.log; date; $(GOTEST) ./lntest/itest -tags="$(ITEST_TAGS)" $(TEST_FLAGS) -logoutput -goroutinedump
|
||||
|
Loading…
Reference in New Issue
Block a user